2024-12-04 15:21:18,451 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-04 15:21:18,467 main DEBUG Took 0.013542 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-04 15:21:18,467 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-04 15:21:18,468 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-04 15:21:18,469 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-04 15:21:18,471 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,480 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-04 15:21:18,495 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,497 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,498 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,499 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,499 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,500 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,501 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,501 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,502 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,503 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,503 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,504 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,504 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,505 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,505 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,506 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,506 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,507 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,507 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,508 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,508 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,509 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,509 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:21:18,510 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,510 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-04 15:21:18,512 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:21:18,514 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-04 15:21:18,516 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-04 15:21:18,517 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-04 15:21:18,518 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-04 15:21:18,519 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-04 15:21:18,529 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-04 15:21:18,532 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-04 15:21:18,534 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-04 15:21:18,535 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-04 15:21:18,535 main DEBUG createAppenders(={Console}) 2024-12-04 15:21:18,536 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-04 15:21:18,537 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-04 15:21:18,537 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-04 15:21:18,538 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-04 15:21:18,538 main DEBUG OutputStream closed 2024-12-04 15:21:18,538 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-04 15:21:18,539 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-04 15:21:18,539 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-04 15:21:18,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-04 15:21:18,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-04 15:21:18,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-04 15:21:18,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-04 15:21:18,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-04 15:21:18,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-04 15:21:18,641 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-04 15:21:18,641 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-04 15:21:18,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-04 15:21:18,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-04 15:21:18,643 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-04 15:21:18,643 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-04 15:21:18,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-04 15:21:18,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-04 15:21:18,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-04 15:21:18,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-04 15:21:18,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-04 15:21:18,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-04 15:21:18,649 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04 15:21:18,650 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-04 15:21:18,650 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-04 15:21:18,651 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-04T15:21:19,029 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf 2024-12-04 15:21:19,033 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-04 15:21:19,034 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04T15:21:19,052 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-04T15:21:19,080 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T15:21:19,085 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d, deleteOnExit=true 2024-12-04T15:21:19,085 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-04T15:21:19,086 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/test.cache.data in system properties and HBase conf 2024-12-04T15:21:19,087 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T15:21:19,088 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/hadoop.log.dir in system properties and HBase conf 2024-12-04T15:21:19,089 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T15:21:19,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T15:21:19,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-04T15:21:19,217 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-04T15:21:19,357 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T15:21:19,361 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T15:21:19,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T15:21:19,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T15:21:19,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T15:21:19,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T15:21:19,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T15:21:19,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T15:21:19,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T15:21:19,366 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T15:21:19,366 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/nfs.dump.dir in system properties and HBase conf 2024-12-04T15:21:19,367 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/java.io.tmpdir in system properties and HBase conf 2024-12-04T15:21:19,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T15:21:19,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T15:21:19,369 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T15:21:20,273 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-04T15:21:20,355 INFO [Time-limited test {}] log.Log(170): Logging initialized @2731ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-04T15:21:20,442 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:21:20,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:21:20,561 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:21:20,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:21:20,564 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T15:21:20,585 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:21:20,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@644d9c1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:21:20,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12fea530{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:21:20,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38fd023f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/java.io.tmpdir/jetty-localhost-42253-hadoop-hdfs-3_4_1-tests_jar-_-any-944757006810336955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T15:21:20,814 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@509a85ef{HTTP/1.1, (http/1.1)}{localhost:42253} 2024-12-04T15:21:20,814 INFO [Time-limited test {}] server.Server(415): Started @3191ms 2024-12-04T15:21:21,249 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:21:21,259 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:21:21,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:21:21,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:21:21,262 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T15:21:21,263 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13c4bd01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:21:21,263 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2da77e5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:21:21,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@646fdc50{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/java.io.tmpdir/jetty-localhost-35561-hadoop-hdfs-3_4_1-tests_jar-_-any-2246438482464013638/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:21:21,391 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22319d81{HTTP/1.1, (http/1.1)}{localhost:35561} 2024-12-04T15:21:21,391 INFO [Time-limited test {}] server.Server(415): Started @3769ms 2024-12-04T15:21:21,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:21:22,000 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/dfs/data/data2/current/BP-1019263298-172.17.0.2-1733325680011/current, will proceed with Du for space computation calculation, 2024-12-04T15:21:22,000 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/dfs/data/data1/current/BP-1019263298-172.17.0.2-1733325680011/current, will proceed with Du for space computation calculation, 2024-12-04T15:21:22,056 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:21:22,137 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2eb992b5effa27b with lease ID 0x95cc75e1a7d6ca5d: Processing first storage report for DS-2c1a47d8-c707-40eb-978b-e639aed32bc2 from datanode DatanodeRegistration(127.0.0.1:35183, datanodeUuid=b02f84e7-d7eb-4d71-b2d2-cf5c4ca9a4cf, infoPort=35985, infoSecurePort=0, ipcPort=35225, storageInfo=lv=-57;cid=testClusterID;nsid=2104634560;c=1733325680011) 2024-12-04T15:21:22,138 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2eb992b5effa27b with lease ID 0x95cc75e1a7d6ca5d: from storage DS-2c1a47d8-c707-40eb-978b-e639aed32bc2 node DatanodeRegistration(127.0.0.1:35183, datanodeUuid=b02f84e7-d7eb-4d71-b2d2-cf5c4ca9a4cf, infoPort=35985, infoSecurePort=0, ipcPort=35225, storageInfo=lv=-57;cid=testClusterID;nsid=2104634560;c=1733325680011), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-04T15:21:22,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2eb992b5effa27b with lease ID 0x95cc75e1a7d6ca5d: Processing first storage report for DS-d0188fbe-641f-4ba6-b76b-f5c6b6e248f8 from datanode DatanodeRegistration(127.0.0.1:35183, datanodeUuid=b02f84e7-d7eb-4d71-b2d2-cf5c4ca9a4cf, infoPort=35985, infoSecurePort=0, ipcPort=35225, storageInfo=lv=-57;cid=testClusterID;nsid=2104634560;c=1733325680011) 2024-12-04T15:21:22,139 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2eb992b5effa27b with lease ID 0x95cc75e1a7d6ca5d: from storage DS-d0188fbe-641f-4ba6-b76b-f5c6b6e248f8 node DatanodeRegistration(127.0.0.1:35183, datanodeUuid=b02f84e7-d7eb-4d71-b2d2-cf5c4ca9a4cf, infoPort=35985, infoSecurePort=0, ipcPort=35225, storageInfo=lv=-57;cid=testClusterID;nsid=2104634560;c=1733325680011), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:21:22,225 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf 2024-12-04T15:21:22,328 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/zookeeper_0, clientPort=55739, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T15:21:22,339 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55739 2024-12-04T15:21:22,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:22,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:22,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741825_1001 (size=7) 2024-12-04T15:21:23,007 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c with version=8 2024-12-04T15:21:23,008 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/hbase-staging 2024-12-04T15:21:23,136 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-04T15:21:23,407 INFO [Time-limited test {}] client.ConnectionUtils(129): master/645c2dbfef2e:0 server-side Connection retries=45 2024-12-04T15:21:23,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:21:23,427 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:21:23,427 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:21:23,427 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:21:23,428 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:21:23,569 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:21:23,630 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-04T15:21:23,639 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-04T15:21:23,642 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:21:23,670 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 7128 (auto-detected) 2024-12-04T15:21:23,671 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-04T15:21:23,691 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33167 2024-12-04T15:21:23,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:23,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:23,713 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33167 connecting to ZooKeeper ensemble=127.0.0.1:55739 2024-12-04T15:21:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331670x0, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:21:23,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33167-0x1005d9eaf690000 connected 2024-12-04T15:21:23,773 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:21:23,775 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:21:23,778 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:21:23,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33167 2024-12-04T15:21:23,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33167 2024-12-04T15:21:23,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33167 2024-12-04T15:21:23,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33167 2024-12-04T15:21:23,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33167 2024-12-04T15:21:23,795 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c, hbase.cluster.distributed=false 2024-12-04T15:21:23,857 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/645c2dbfef2e:0 server-side Connection retries=45 2024-12-04T15:21:23,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:21:23,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:21:23,858 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:21:23,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:21:23,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:21:23,861 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:21:23,863 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:21:23,864 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42169 2024-12-04T15:21:23,866 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:21:23,871 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:21:23,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:23,876 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:23,879 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42169 connecting to ZooKeeper ensemble=127.0.0.1:55739 2024-12-04T15:21:23,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421690x0, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:21:23,883 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:421690x0, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:21:23,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42169-0x1005d9eaf690001 connected 2024-12-04T15:21:23,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:21:23,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:21:23,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42169 2024-12-04T15:21:23,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42169 2024-12-04T15:21:23,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42169 2024-12-04T15:21:23,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42169 2024-12-04T15:21:23,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42169 2024-12-04T15:21:23,898 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:23,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:21:23,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:21:23,907 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:23,914 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;645c2dbfef2e:33167 2024-12-04T15:21:23,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:21:23,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:21:23,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:23,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:23,929 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T15:21:23,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T15:21:23,930 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/645c2dbfef2e,33167,1733325683129 from backup master directory 2024-12-04T15:21:23,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:23,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:21:23,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:21:23,933 WARN [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:21:23,934 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:23,936 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-04T15:21:23,937 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-04T15:21:24,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741826_1002 (size=42) 2024-12-04T15:21:24,409 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/hbase.id with ID: 03da07d4-d406-4884-a651-2f117f2c62a2 2024-12-04T15:21:24,454 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:21:24,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:24,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:24,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741827_1003 (size=196) 2024-12-04T15:21:24,918 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:21:24,921 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T15:21:24,941 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:24,946 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:21:24,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741828_1004 (size=1189) 2024-12-04T15:21:25,399 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store 2024-12-04T15:21:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741829_1005 (size=34) 2024-12-04T15:21:25,825 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-04T15:21:25,825 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:25,827 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T15:21:25,827 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:21:25,827 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:21:25,827 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T15:21:25,827 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:21:25,827 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:21:25,828 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-04T15:21:25,830 WARN [master/645c2dbfef2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/.initializing 2024-12-04T15:21:25,830 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/WALs/645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:25,837 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-04T15:21:25,848 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=645c2dbfef2e%2C33167%2C1733325683129, suffix=, logDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/WALs/645c2dbfef2e,33167,1733325683129, archiveDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/oldWALs, maxLogs=10 2024-12-04T15:21:25,869 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/WALs/645c2dbfef2e,33167,1733325683129/645c2dbfef2e%2C33167%2C1733325683129.1733325685853, exclude list is [], retry=0 2024-12-04T15:21:25,890 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35183,DS-2c1a47d8-c707-40eb-978b-e639aed32bc2,DISK] 2024-12-04T15:21:25,894 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-04T15:21:25,937 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/WALs/645c2dbfef2e,33167,1733325683129/645c2dbfef2e%2C33167%2C1733325683129.1733325685853 2024-12-04T15:21:25,938 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35985:35985)] 2024-12-04T15:21:25,939 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:21:25,939 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:25,942 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:25,943 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:25,981 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T15:21:26,009 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:26,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:26,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T15:21:26,016 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:26,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:26,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T15:21:26,021 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:26,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:26,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T15:21:26,027 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:26,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:26,032 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,034 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,043 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:21:26,047 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:21:26,052 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:21:26,053 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61935439, jitterRate=-0.07709003984928131}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:21:26,058 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-04T15:21:26,059 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T15:21:26,088 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61e23004, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:26,122 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-04T15:21:26,134 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T15:21:26,134 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T15:21:26,136 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T15:21:26,138 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-04T15:21:26,143 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-04T15:21:26,143 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T15:21:26,174 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T15:21:26,189 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T15:21:26,192 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-04T15:21:26,195 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T15:21:26,196 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T15:21:26,198 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-04T15:21:26,201 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T15:21:26,205 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T15:21:26,207 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-04T15:21:26,208 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T15:21:26,210 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T15:21:26,220 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T15:21:26,222 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T15:21:26,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:21:26,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:21:26,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:26,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:26,227 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=645c2dbfef2e,33167,1733325683129, sessionid=0x1005d9eaf690000, setting cluster-up flag (Was=false) 2024-12-04T15:21:26,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:26,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:26,246 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T15:21:26,248 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:26,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:26,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:26,260 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T15:21:26,261 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:26,313 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;645c2dbfef2e:42169 2024-12-04T15:21:26,314 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1008): ClusterId : 03da07d4-d406-4884-a651-2f117f2c62a2 2024-12-04T15:21:26,317 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:21:26,322 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:21:26,322 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:21:26,325 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:21:26,326 DEBUG [RS:0;645c2dbfef2e:42169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@761997ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:26,327 DEBUG [RS:0;645c2dbfef2e:42169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cc7fcff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=645c2dbfef2e/172.17.0.2:0 2024-12-04T15:21:26,330 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-04T15:21:26,330 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-04T15:21:26,331 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-04T15:21:26,333 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(3073): reportForDuty to master=645c2dbfef2e,33167,1733325683129 with isa=645c2dbfef2e/172.17.0.2:42169, startcode=1733325683856 2024-12-04T15:21:26,347 DEBUG [RS:0;645c2dbfef2e:42169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:21:26,363 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-04T15:21:26,372 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-04T15:21:26,377 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T15:21:26,383 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 645c2dbfef2e,33167,1733325683129 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T15:21:26,386 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/645c2dbfef2e:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:21:26,386 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/645c2dbfef2e:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:21:26,386 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/645c2dbfef2e:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:21:26,387 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/645c2dbfef2e:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:21:26,387 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/645c2dbfef2e:0, corePoolSize=10, maxPoolSize=10 2024-12-04T15:21:26,387 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,387 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/645c2dbfef2e:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:21:26,387 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,390 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35717, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:21:26,393 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733325716393 2024-12-04T15:21:26,393 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:21:26,394 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-04T15:21:26,395 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T15:21:26,396 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T15:21:26,398 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33167 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:26,399 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:26,400 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T15:21:26,401 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T15:21:26,401 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T15:21:26,402 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T15:21:26,402 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T15:21:26,404 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,409 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T15:21:26,410 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T15:21:26,411 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T15:21:26,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741831_1007 (size=1039) 2024-12-04T15:21:26,416 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T15:21:26,416 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T15:21:26,420 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/645c2dbfef2e:0:becomeActiveMaster-HFileCleaner.large.0-1733325686417,5,FailOnTimeoutGroup] 2024-12-04T15:21:26,422 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/645c2dbfef2e:0:becomeActiveMaster-HFileCleaner.small.0-1733325686420,5,FailOnTimeoutGroup] 2024-12-04T15:21:26,422 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,422 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T15:21:26,424 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,424 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,440 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-04T15:21:26,440 WARN [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-04T15:21:26,542 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(3073): reportForDuty to master=645c2dbfef2e,33167,1733325683129 with isa=645c2dbfef2e/172.17.0.2:42169, startcode=1733325683856 2024-12-04T15:21:26,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33167 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:26,546 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33167 {}] master.ServerManager(486): Registering regionserver=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:26,555 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:21:26,555 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38975 2024-12-04T15:21:26,555 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-04T15:21:26,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:21:26,560 DEBUG [RS:0;645c2dbfef2e:42169 {}] zookeeper.ZKUtil(111): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:26,560 WARN [RS:0;645c2dbfef2e:42169 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:21:26,561 INFO [RS:0;645c2dbfef2e:42169 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:21:26,561 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:26,563 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [645c2dbfef2e,42169,1733325683856] 2024-12-04T15:21:26,574 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-04T15:21:26,586 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:21:26,598 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:21:26,600 INFO [RS:0;645c2dbfef2e:42169 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:21:26,601 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,601 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-04T15:21:26,608 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,608 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,608 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,608 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/645c2dbfef2e:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,609 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,610 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/645c2dbfef2e:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:21:26,610 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/645c2dbfef2e:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:21:26,610 DEBUG [RS:0;645c2dbfef2e:42169 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:21:26,611 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,611 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,611 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,611 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,612 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,42169,1733325683856-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:21:26,642 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:21:26,645 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,42169,1733325683856-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:26,665 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.Replication(204): 645c2dbfef2e,42169,1733325683856 started 2024-12-04T15:21:26,666 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1767): Serving as 645c2dbfef2e,42169,1733325683856, RpcServer on 645c2dbfef2e/172.17.0.2:42169, sessionid=0x1005d9eaf690001 2024-12-04T15:21:26,666 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:21:26,666 DEBUG [RS:0;645c2dbfef2e:42169 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:26,666 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '645c2dbfef2e,42169,1733325683856' 2024-12-04T15:21:26,666 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:21:26,667 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:21:26,668 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:21:26,668 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:21:26,668 DEBUG [RS:0;645c2dbfef2e:42169 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:26,668 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '645c2dbfef2e,42169,1733325683856' 2024-12-04T15:21:26,668 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:21:26,669 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:21:26,669 DEBUG [RS:0;645c2dbfef2e:42169 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:21:26,670 INFO [RS:0;645c2dbfef2e:42169 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:21:26,670 INFO [RS:0;645c2dbfef2e:42169 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:21:26,775 INFO [RS:0;645c2dbfef2e:42169 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-04T15:21:26,779 INFO [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=645c2dbfef2e%2C42169%2C1733325683856, suffix=, logDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856, archiveDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/oldWALs, maxLogs=32 2024-12-04T15:21:26,797 DEBUG [RS:0;645c2dbfef2e:42169 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856/645c2dbfef2e%2C42169%2C1733325683856.1733325686782, exclude list is [], retry=0 2024-12-04T15:21:26,802 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35183,DS-2c1a47d8-c707-40eb-978b-e639aed32bc2,DISK] 2024-12-04T15:21:26,806 INFO [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856/645c2dbfef2e%2C42169%2C1733325683856.1733325686782 2024-12-04T15:21:26,806 DEBUG [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35985:35985)] 2024-12-04T15:21:26,818 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-04T15:21:26,818 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:21:26,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741833_1009 (size=32) 2024-12-04T15:21:27,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:27,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T15:21:27,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T15:21:27,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:27,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T15:21:27,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T15:21:27,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:27,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T15:21:27,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T15:21:27,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:27,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740 2024-12-04T15:21:27,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740 2024-12-04T15:21:27,254 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:21:27,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-04T15:21:27,262 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:21:27,263 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75445578, jitterRate=0.12422671914100647}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:21:27,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-04T15:21:27,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-04T15:21:27,267 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-04T15:21:27,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-04T15:21:27,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T15:21:27,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T15:21:27,269 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-04T15:21:27,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-04T15:21:27,272 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:21:27,272 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-04T15:21:27,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T15:21:27,286 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T15:21:27,289 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T15:21:27,442 DEBUG [645c2dbfef2e:33167 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T15:21:27,448 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:27,454 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 645c2dbfef2e,42169,1733325683856, state=OPENING 2024-12-04T15:21:27,461 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T15:21:27,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:27,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:27,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:21:27,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:21:27,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:27,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:27,644 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T15:21:27,648 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T15:21:27,659 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-04T15:21:27,659 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:21:27,660 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-04T15:21:27,663 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=645c2dbfef2e%2C42169%2C1733325683856.meta, suffix=.meta, logDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856, archiveDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/oldWALs, maxLogs=32 2024-12-04T15:21:27,690 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856/645c2dbfef2e%2C42169%2C1733325683856.meta.1733325687666.meta, exclude list is [], retry=0 2024-12-04T15:21:27,694 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35183,DS-2c1a47d8-c707-40eb-978b-e639aed32bc2,DISK] 2024-12-04T15:21:27,698 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856/645c2dbfef2e%2C42169%2C1733325683856.meta.1733325687666.meta 2024-12-04T15:21:27,698 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35985:35985)] 2024-12-04T15:21:27,699 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:21:27,700 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T15:21:27,761 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T15:21:27,766 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T15:21:27,770 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T15:21:27,770 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:27,771 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-04T15:21:27,771 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-04T15:21:27,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T15:21:27,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T15:21:27,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:27,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T15:21:27,780 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T15:21:27,780 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:27,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T15:21:27,783 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T15:21:27,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:21:27,785 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740 2024-12-04T15:21:27,788 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740 2024-12-04T15:21:27,791 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:21:27,795 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-04T15:21:27,796 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64753973, jitterRate=-0.0350906103849411}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:21:27,798 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-04T15:21:27,806 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733325687636 2024-12-04T15:21:27,819 DEBUG [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T15:21:27,819 INFO [RS_OPEN_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-04T15:21:27,820 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:27,822 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 645c2dbfef2e,42169,1733325683856, state=OPEN 2024-12-04T15:21:27,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:21:27,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:21:27,829 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:21:27,829 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:21:27,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T15:21:27,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=645c2dbfef2e,42169,1733325683856 in 363 msec 2024-12-04T15:21:27,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T15:21:27,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 561 msec 2024-12-04T15:21:27,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5480 sec 2024-12-04T15:21:27,852 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733325687852, completionTime=-1 2024-12-04T15:21:27,853 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T15:21:27,853 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-04T15:21:27,891 DEBUG [hconnection-0x656e3dc1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:27,894 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55672, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:27,904 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-04T15:21:27,905 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733325747904 2024-12-04T15:21:27,905 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733325807905 2024-12-04T15:21:27,905 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-12-04T15:21:27,926 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,33167,1733325683129-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:27,926 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,33167,1733325683129-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:27,926 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,33167,1733325683129-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:27,927 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-645c2dbfef2e:33167, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:27,928 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:27,933 DEBUG [master/645c2dbfef2e:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-04T15:21:27,936 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-04T15:21:27,937 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T15:21:27,944 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-04T15:21:27,947 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:21:27,948 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:27,950 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:21:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741835_1011 (size=358) 2024-12-04T15:21:28,366 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 369c833ab6e0e1ae5f4d743d2988012a, NAME => 'hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:21:28,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741836_1012 (size=42) 2024-12-04T15:21:28,376 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:28,376 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 369c833ab6e0e1ae5f4d743d2988012a, disabling compactions & flushes 2024-12-04T15:21:28,377 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,377 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,377 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. after waiting 0 ms 2024-12-04T15:21:28,377 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,377 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,377 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 369c833ab6e0e1ae5f4d743d2988012a: 2024-12-04T15:21:28,379 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:21:28,386 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733325688380"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325688380"}]},"ts":"1733325688380"} 2024-12-04T15:21:28,443 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:21:28,446 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:21:28,449 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325688446"}]},"ts":"1733325688446"} 2024-12-04T15:21:28,455 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-04T15:21:28,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=369c833ab6e0e1ae5f4d743d2988012a, ASSIGN}] 2024-12-04T15:21:28,464 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=369c833ab6e0e1ae5f4d743d2988012a, ASSIGN 2024-12-04T15:21:28,465 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=369c833ab6e0e1ae5f4d743d2988012a, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:21:28,616 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=369c833ab6e0e1ae5f4d743d2988012a, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:28,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 369c833ab6e0e1ae5f4d743d2988012a, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:28,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:28,782 INFO [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,782 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 369c833ab6e0e1ae5f4d743d2988012a, NAME => 'hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:21:28,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:28,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,786 INFO [StoreOpener-369c833ab6e0e1ae5f4d743d2988012a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,788 INFO [StoreOpener-369c833ab6e0e1ae5f4d743d2988012a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 369c833ab6e0e1ae5f4d743d2988012a columnFamilyName info 2024-12-04T15:21:28,788 DEBUG [StoreOpener-369c833ab6e0e1ae5f4d743d2988012a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:28,789 INFO [StoreOpener-369c833ab6e0e1ae5f4d743d2988012a-1 {}] regionserver.HStore(327): Store=369c833ab6e0e1ae5f4d743d2988012a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:28,791 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,792 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:21:28,804 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:21:28,805 INFO [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 369c833ab6e0e1ae5f4d743d2988012a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62333756, jitterRate=-0.0711546540260315}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T15:21:28,807 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 369c833ab6e0e1ae5f4d743d2988012a: 2024-12-04T15:21:28,810 INFO [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a., pid=6, masterSystemTime=1733325688775 2024-12-04T15:21:28,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,814 INFO [RS_OPEN_PRIORITY_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:21:28,815 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=369c833ab6e0e1ae5f4d743d2988012a, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:28,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T15:21:28,826 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 369c833ab6e0e1ae5f4d743d2988012a, server=645c2dbfef2e,42169,1733325683856 in 199 msec 2024-12-04T15:21:28,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T15:21:28,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=369c833ab6e0e1ae5f4d743d2988012a, ASSIGN in 363 msec 2024-12-04T15:21:28,830 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:21:28,830 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325688830"}]},"ts":"1733325688830"} 2024-12-04T15:21:28,833 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-04T15:21:28,837 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:21:28,840 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 899 msec 2024-12-04T15:21:28,848 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-04T15:21:28,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-04T15:21:28,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:28,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:21:28,884 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-04T15:21:28,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-04T15:21:28,905 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 26 msec 2024-12-04T15:21:28,918 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-04T15:21:28,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-04T15:21:28,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 16 msec 2024-12-04T15:21:28,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-04T15:21:28,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-04T15:21:28,948 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.013sec 2024-12-04T15:21:28,949 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T15:21:28,950 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T15:21:28,951 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T15:21:28,952 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T15:21:28,952 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T15:21:28,953 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,33167,1733325683129-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:21:28,953 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,33167,1733325683129-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T15:21:28,960 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-04T15:21:28,961 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T15:21:28,962 INFO [master/645c2dbfef2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=645c2dbfef2e,33167,1733325683129-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:21:29,030 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x731b646e to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18714851 2024-12-04T15:21:29,031 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-04T15:21:29,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c593042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:29,047 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-04T15:21:29,047 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-04T15:21:29,056 DEBUG [hconnection-0x5ac72b8-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:29,066 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:29,077 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=645c2dbfef2e,33167,1733325683129 2024-12-04T15:21:29,096 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=865, ProcessCount=11, AvailableMemoryMB=4692 2024-12-04T15:21:29,109 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:21:29,112 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:21:29,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:21:29,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:21:29,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:29,127 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:21:29,128 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:29,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-04T15:21:29,130 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:21:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-04T15:21:29,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741837_1013 (size=963) 2024-12-04T15:21:29,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-04T15:21:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-04T15:21:29,549 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:21:29,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741838_1014 (size=53) 2024-12-04T15:21:29,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:29,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e2e19d2bb9bfcadbc1f5e0b910706700, disabling compactions & flushes 2024-12-04T15:21:29,563 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. after waiting 0 ms 2024-12-04T15:21:29,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,563 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:29,565 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:21:29,566 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733325689565"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325689565"}]},"ts":"1733325689565"} 2024-12-04T15:21:29,569 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:21:29,570 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:21:29,571 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325689571"}]},"ts":"1733325689571"} 2024-12-04T15:21:29,573 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-04T15:21:29,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, ASSIGN}] 2024-12-04T15:21:29,579 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, ASSIGN 2024-12-04T15:21:29,581 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:21:29,732 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=e2e19d2bb9bfcadbc1f5e0b910706700, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:29,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-04T15:21:29,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:29,896 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,896 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:21:29,897 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,897 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:29,897 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,897 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,899 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,903 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:29,903 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2e19d2bb9bfcadbc1f5e0b910706700 columnFamilyName A 2024-12-04T15:21:29,903 DEBUG [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:29,904 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.HStore(327): Store=e2e19d2bb9bfcadbc1f5e0b910706700/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:29,904 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,907 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:29,907 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2e19d2bb9bfcadbc1f5e0b910706700 columnFamilyName B 2024-12-04T15:21:29,907 DEBUG [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:29,908 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.HStore(327): Store=e2e19d2bb9bfcadbc1f5e0b910706700/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:29,909 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,911 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:29,912 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2e19d2bb9bfcadbc1f5e0b910706700 columnFamilyName C 2024-12-04T15:21:29,912 DEBUG [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:29,913 INFO [StoreOpener-e2e19d2bb9bfcadbc1f5e0b910706700-1 {}] regionserver.HStore(327): Store=e2e19d2bb9bfcadbc1f5e0b910706700/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:29,913 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,915 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,915 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,918 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:21:29,920 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:29,925 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:21:29,926 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened e2e19d2bb9bfcadbc1f5e0b910706700; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64872171, jitterRate=-0.033329322934150696}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:21:29,927 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:29,929 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., pid=11, masterSystemTime=1733325689889 2024-12-04T15:21:29,932 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,932 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:29,933 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=e2e19d2bb9bfcadbc1f5e0b910706700, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:29,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-04T15:21:29,940 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 in 201 msec 2024-12-04T15:21:29,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-04T15:21:29,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, ASSIGN in 362 msec 2024-12-04T15:21:29,944 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:21:29,944 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325689944"}]},"ts":"1733325689944"} 2024-12-04T15:21:29,947 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-04T15:21:29,951 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:21:29,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 828 msec 2024-12-04T15:21:30,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-04T15:21:30,249 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-04T15:21:30,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a212a13 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a27982 2024-12-04T15:21:30,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c36b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,262 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,264 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,268 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:21:30,270 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:21:30,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584bdfe to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d769f52 2024-12-04T15:21:30,281 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3990cf61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,282 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ecd23be to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27f8a33a 2024-12-04T15:21:30,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f58386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,287 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f7de97f to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7dbc4f37 2024-12-04T15:21:30,290 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50f80ab5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,291 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x731457d6 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cce6b74 2024-12-04T15:21:30,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d4c8261, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,295 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x451dda6c to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17ee117b 2024-12-04T15:21:30,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee8c66d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,300 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496ef540 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e4ca7fd 2024-12-04T15:21:30,305 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b8adb84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,306 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x334e87ba to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4846bd61 2024-12-04T15:21:30,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25fd1916, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,310 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3738df30 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a405568 2024-12-04T15:21:30,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10a95a5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d88de0e to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@159b2dc2 2024-12-04T15:21:30,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f3afe97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:30,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-04T15:21:30,338 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:30,339 DEBUG [hconnection-0x4360c845-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,340 DEBUG [hconnection-0x703098f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,340 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:30,340 DEBUG [hconnection-0x42186a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-04T15:21:30,341 DEBUG [hconnection-0x4872f2f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,341 DEBUG [hconnection-0x7c0bf2ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,342 DEBUG [hconnection-0x64ccd404-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,342 DEBUG [hconnection-0x53f1d8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:30,344 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,345 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,345 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55704, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,346 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,350 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,351 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,352 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,352 DEBUG [hconnection-0x33871dae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:30,359 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55774, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,360 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,368 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-04T15:21:30,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:21:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:30,518 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:30,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:30,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:30,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:30,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:30,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:30,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:30,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:30,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:30,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:30,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-04T15:21:30,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ef8bb7216b284dbc87758cd419ce5461 is 50, key is test_row_0/A:col10/1733325690507/Put/seqid=0 2024-12-04T15:21:30,719 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:30,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325750695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325750703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325750707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325750709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:30,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:30,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325750719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741839_1015 (size=16681) 2024-12-04T15:21:30,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325750841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325750842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:30,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325750844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325750845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325750844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,901 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:30,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:30,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:30,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:30,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:30,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:30,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-04T15:21:31,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325751064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325751067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325751067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:31,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:31,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325751076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325751075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ef8bb7216b284dbc87758cd419ce5461 2024-12-04T15:21:31,236 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:31,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:31,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/30e4f0c9c3094328b4dfd718d8feaa57 is 50, key is test_row_0/B:col10/1733325690507/Put/seqid=0 2024-12-04T15:21:31,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741840_1016 (size=12001) 2024-12-04T15:21:31,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/30e4f0c9c3094328b4dfd718d8feaa57 2024-12-04T15:21:31,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325751371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/1c120041837043a98c3668e9605a8152 is 50, key is test_row_0/C:col10/1733325690507/Put/seqid=0 2024-12-04T15:21:31,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325751375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325751382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325751385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325751387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,395 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:31,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:31,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741841_1017 (size=12001) 2024-12-04T15:21:31,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/1c120041837043a98c3668e9605a8152 2024-12-04T15:21:31,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ef8bb7216b284dbc87758cd419ce5461 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef8bb7216b284dbc87758cd419ce5461 2024-12-04T15:21:31,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-04T15:21:31,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef8bb7216b284dbc87758cd419ce5461, entries=250, sequenceid=16, filesize=16.3 K 2024-12-04T15:21:31,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/30e4f0c9c3094328b4dfd718d8feaa57 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/30e4f0c9c3094328b4dfd718d8feaa57 2024-12-04T15:21:31,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/30e4f0c9c3094328b4dfd718d8feaa57, entries=150, sequenceid=16, filesize=11.7 K 2024-12-04T15:21:31,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/1c120041837043a98c3668e9605a8152 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1c120041837043a98c3668e9605a8152 2024-12-04T15:21:31,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1c120041837043a98c3668e9605a8152, entries=150, sequenceid=16, filesize=11.7 K 2024-12-04T15:21:31,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for e2e19d2bb9bfcadbc1f5e0b910706700 in 991ms, sequenceid=16, compaction requested=false 2024-12-04T15:21:31,501 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-04T15:21:31,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:31,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:31,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-04T15:21:31,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,553 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:21:31,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:31,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:31,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:31,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:31,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:31,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:31,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/52242619bf1f40aca9f9ba0d16d91eaa is 50, key is test_row_0/A:col10/1733325690700/Put/seqid=0 2024-12-04T15:21:31,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741842_1018 (size=12001) 2024-12-04T15:21:31,645 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/52242619bf1f40aca9f9ba0d16d91eaa 2024-12-04T15:21:31,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0001fdca5f1649d9815a141c13dd224f is 50, key is test_row_0/B:col10/1733325690700/Put/seqid=0 2024-12-04T15:21:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741843_1019 (size=12001) 2024-12-04T15:21:31,722 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0001fdca5f1649d9815a141c13dd224f 2024-12-04T15:21:31,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7d8897002c134c9c956145be7be2f041 is 50, key is test_row_0/C:col10/1733325690700/Put/seqid=0 2024-12-04T15:21:31,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741844_1020 (size=12001) 2024-12-04T15:21:31,812 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7d8897002c134c9c956145be7be2f041 2024-12-04T15:21:31,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/52242619bf1f40aca9f9ba0d16d91eaa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/52242619bf1f40aca9f9ba0d16d91eaa 2024-12-04T15:21:31,842 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/52242619bf1f40aca9f9ba0d16d91eaa, entries=150, sequenceid=38, filesize=11.7 K 2024-12-04T15:21:31,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0001fdca5f1649d9815a141c13dd224f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0001fdca5f1649d9815a141c13dd224f 2024-12-04T15:21:31,859 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0001fdca5f1649d9815a141c13dd224f, entries=150, sequenceid=38, filesize=11.7 K 2024-12-04T15:21:31,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7d8897002c134c9c956145be7be2f041 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7d8897002c134c9c956145be7be2f041 2024-12-04T15:21:31,878 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7d8897002c134c9c956145be7be2f041, entries=150, sequenceid=38, filesize=11.7 K 2024-12-04T15:21:31,884 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for e2e19d2bb9bfcadbc1f5e0b910706700 in 332ms, sequenceid=38, compaction requested=false 2024-12-04T15:21:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-04T15:21:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-04T15:21:31,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-04T15:21:31,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5450 sec 2024-12-04T15:21:31,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.5590 sec 2024-12-04T15:21:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:31,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:21:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:31,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b437dbcd80d84e528eef635ef28fbc32 is 50, key is test_row_0/A:col10/1733325691930/Put/seqid=0 2024-12-04T15:21:31,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741845_1021 (size=19021) 2024-12-04T15:21:32,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325752045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325752052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325752056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325752059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325752064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325752167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325752174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325752175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325752186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325752186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b437dbcd80d84e528eef635ef28fbc32 2024-12-04T15:21:32,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325752389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325752392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325752393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325752395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325752397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/316c156b65414e1faced5ef02c14bafc is 50, key is test_row_0/B:col10/1733325691930/Put/seqid=0 2024-12-04T15:21:32,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-04T15:21:32,453 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-04T15:21:32,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:32,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-04T15:21:32,460 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:32,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-04T15:21:32,462 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:32,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:32,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741846_1022 (size=12001) 2024-12-04T15:21:32,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/316c156b65414e1faced5ef02c14bafc 2024-12-04T15:21:32,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f3246de1084f4664ac24feb4aa4e2597 is 50, key is test_row_0/C:col10/1733325691930/Put/seqid=0 2024-12-04T15:21:32,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741847_1023 (size=12001) 2024-12-04T15:21:32,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f3246de1084f4664ac24feb4aa4e2597 2024-12-04T15:21:32,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b437dbcd80d84e528eef635ef28fbc32 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b437dbcd80d84e528eef635ef28fbc32 2024-12-04T15:21:32,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-04T15:21:32,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b437dbcd80d84e528eef635ef28fbc32, entries=300, sequenceid=49, filesize=18.6 K 2024-12-04T15:21:32,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/316c156b65414e1faced5ef02c14bafc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/316c156b65414e1faced5ef02c14bafc 2024-12-04T15:21:32,579 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T15:21:32,580 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-04T15:21:32,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/316c156b65414e1faced5ef02c14bafc, entries=150, sequenceid=49, filesize=11.7 K 2024-12-04T15:21:32,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f3246de1084f4664ac24feb4aa4e2597 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f3246de1084f4664ac24feb4aa4e2597 2024-12-04T15:21:32,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f3246de1084f4664ac24feb4aa4e2597, entries=150, sequenceid=49, filesize=11.7 K 2024-12-04T15:21:32,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e2e19d2bb9bfcadbc1f5e0b910706700 in 676ms, sequenceid=49, compaction requested=true 2024-12-04T15:21:32,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:32,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:32,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:32,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:32,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:32,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:32,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:32,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-04T15:21:32,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:32,618 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:21:32,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:32,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:32,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:32,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:32,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:32,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:32,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b04ae325d5384cabbdc0d1da9e9ef1f6 is 50, key is test_row_0/A:col10/1733325692028/Put/seqid=0 2024-12-04T15:21:32,635 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:32,642 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:32,645 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:32,647 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:32,647 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:32,648 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/30e4f0c9c3094328b4dfd718d8feaa57, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0001fdca5f1649d9815a141c13dd224f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/316c156b65414e1faced5ef02c14bafc] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.2 K 2024-12-04T15:21:32,649 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 30e4f0c9c3094328b4dfd718d8feaa57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733325690452 2024-12-04T15:21:32,650 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0001fdca5f1649d9815a141c13dd224f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733325690696 2024-12-04T15:21:32,651 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 316c156b65414e1faced5ef02c14bafc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733325691917 2024-12-04T15:21:32,651 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47703 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:32,652 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:32,652 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:32,652 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef8bb7216b284dbc87758cd419ce5461, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/52242619bf1f40aca9f9ba0d16d91eaa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b437dbcd80d84e528eef635ef28fbc32] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=46.6 K 2024-12-04T15:21:32,653 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef8bb7216b284dbc87758cd419ce5461, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733325690409 2024-12-04T15:21:32,654 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52242619bf1f40aca9f9ba0d16d91eaa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733325690696 2024-12-04T15:21:32,655 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b437dbcd80d84e528eef635ef28fbc32, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733325691917 2024-12-04T15:21:32,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741848_1024 (size=12001) 2024-12-04T15:21:32,691 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#10 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:32,693 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/6209b51762b4457eb598b5b0578378cc is 50, key is test_row_0/A:col10/1733325691930/Put/seqid=0 2024-12-04T15:21:32,694 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#11 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:32,695 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/08132e240abe4ef0b171abcf320cca72 is 50, key is test_row_0/B:col10/1733325691930/Put/seqid=0 2024-12-04T15:21:32,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:32,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:32,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741849_1025 (size=12104) 2024-12-04T15:21:32,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325752745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325752746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325752748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325752750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741850_1026 (size=12104) 2024-12-04T15:21:32,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325752757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-04T15:21:32,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325752859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325752860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325752861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325752862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:32,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325752867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,062 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b04ae325d5384cabbdc0d1da9e9ef1f6 2024-12-04T15:21:33,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325753068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325753068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325753070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325753070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-04T15:21:33,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325753076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/f64f520bdb854bdfa825e935b4fa97cc is 50, key is test_row_0/B:col10/1733325692028/Put/seqid=0 2024-12-04T15:21:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741851_1027 (size=12001) 2024-12-04T15:21:33,128 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/f64f520bdb854bdfa825e935b4fa97cc 2024-12-04T15:21:33,159 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/6209b51762b4457eb598b5b0578378cc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/6209b51762b4457eb598b5b0578378cc 2024-12-04T15:21:33,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e01f910056304890995b04d8d0120e2a is 50, key is test_row_0/C:col10/1733325692028/Put/seqid=0 2024-12-04T15:21:33,189 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/08132e240abe4ef0b171abcf320cca72 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/08132e240abe4ef0b171abcf320cca72 2024-12-04T15:21:33,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741852_1028 (size=12001) 2024-12-04T15:21:33,211 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e01f910056304890995b04d8d0120e2a 2024-12-04T15:21:33,244 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 6209b51762b4457eb598b5b0578378cc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:33,244 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 08132e240abe4ef0b171abcf320cca72(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:33,244 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,244 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,245 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325692617; duration=0sec 2024-12-04T15:21:33,245 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325692610; duration=0sec 2024-12-04T15:21:33,245 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:33,245 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:33,245 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:33,245 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:33,245 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:33,250 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:33,250 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:33,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b04ae325d5384cabbdc0d1da9e9ef1f6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b04ae325d5384cabbdc0d1da9e9ef1f6 2024-12-04T15:21:33,250 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:33,250 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1c120041837043a98c3668e9605a8152, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7d8897002c134c9c956145be7be2f041, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f3246de1084f4664ac24feb4aa4e2597] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.2 K 2024-12-04T15:21:33,252 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c120041837043a98c3668e9605a8152, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733325690452 2024-12-04T15:21:33,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d8897002c134c9c956145be7be2f041, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733325690696 2024-12-04T15:21:33,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f3246de1084f4664ac24feb4aa4e2597, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733325691917 2024-12-04T15:21:33,264 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b04ae325d5384cabbdc0d1da9e9ef1f6, entries=150, sequenceid=74, filesize=11.7 K 2024-12-04T15:21:33,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/f64f520bdb854bdfa825e935b4fa97cc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f64f520bdb854bdfa825e935b4fa97cc 2024-12-04T15:21:33,281 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f64f520bdb854bdfa825e935b4fa97cc, entries=150, sequenceid=74, filesize=11.7 K 2024-12-04T15:21:33,281 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#14 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:33,283 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f847537f0bd84866ac1ed21ffbe55e54 is 50, key is test_row_0/C:col10/1733325691930/Put/seqid=0 2024-12-04T15:21:33,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e01f910056304890995b04d8d0120e2a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e01f910056304890995b04d8d0120e2a 2024-12-04T15:21:33,285 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T15:21:33,294 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e01f910056304890995b04d8d0120e2a, entries=150, sequenceid=74, filesize=11.7 K 2024-12-04T15:21:33,297 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for e2e19d2bb9bfcadbc1f5e0b910706700 in 679ms, sequenceid=74, compaction requested=false 2024-12-04T15:21:33,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:33,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-04T15:21:33,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-04T15:21:33,304 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-04T15:21:33,304 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 836 msec 2024-12-04T15:21:33,308 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 849 msec 2024-12-04T15:21:33,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741853_1029 (size=12104) 2024-12-04T15:21:33,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:33,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:21:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:33,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/2ce0c5b8373a45199dfc3d32db4dbd56 is 50, key is test_row_0/A:col10/1733325692735/Put/seqid=0 2024-12-04T15:21:33,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325753434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325753444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325753446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325753455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741854_1030 (size=12001) 2024-12-04T15:21:33,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/2ce0c5b8373a45199dfc3d32db4dbd56 2024-12-04T15:21:33,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325753459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/731b79f5c3814400bf0474eb55fed6f9 is 50, key is test_row_0/B:col10/1733325692735/Put/seqid=0 2024-12-04T15:21:33,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741855_1031 (size=12001) 2024-12-04T15:21:33,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/731b79f5c3814400bf0474eb55fed6f9 2024-12-04T15:21:33,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/3146490b8eb0430caeeecd2ad3793874 is 50, key is test_row_0/C:col10/1733325692735/Put/seqid=0 2024-12-04T15:21:33,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325753550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325753554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325753556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325753563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741856_1032 (size=12001) 2024-12-04T15:21:33,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/3146490b8eb0430caeeecd2ad3793874 2024-12-04T15:21:33,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325753570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-04T15:21:33,582 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-04T15:21:33,584 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-04T15:21:33,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/2ce0c5b8373a45199dfc3d32db4dbd56 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2ce0c5b8373a45199dfc3d32db4dbd56 2024-12-04T15:21:33,588 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-04T15:21:33,589 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:33,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:33,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2ce0c5b8373a45199dfc3d32db4dbd56, entries=150, sequenceid=89, filesize=11.7 K 2024-12-04T15:21:33,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/731b79f5c3814400bf0474eb55fed6f9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/731b79f5c3814400bf0474eb55fed6f9 2024-12-04T15:21:33,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/731b79f5c3814400bf0474eb55fed6f9, entries=150, sequenceid=89, filesize=11.7 K 2024-12-04T15:21:33,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/3146490b8eb0430caeeecd2ad3793874 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/3146490b8eb0430caeeecd2ad3793874 2024-12-04T15:21:33,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T15:21:33,633 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T15:21:33,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/3146490b8eb0430caeeecd2ad3793874, entries=150, sequenceid=89, filesize=11.7 K 2024-12-04T15:21:33,635 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-04T15:21:33,635 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-04T15:21:33,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T15:21:33,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T15:21:33,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T15:21:33,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-04T15:21:33,641 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-04T15:21:33,641 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-04T15:21:33,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for e2e19d2bb9bfcadbc1f5e0b910706700 in 262ms, sequenceid=89, compaction requested=true 2024-12-04T15:21:33,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:33,647 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:33,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:21:33,649 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:33,650 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:33,650 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:33,650 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/6209b51762b4457eb598b5b0578378cc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b04ae325d5384cabbdc0d1da9e9ef1f6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2ce0c5b8373a45199dfc3d32db4dbd56] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.3 K 2024-12-04T15:21:33,651 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6209b51762b4457eb598b5b0578378cc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733325691917 2024-12-04T15:21:33,652 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b04ae325d5384cabbdc0d1da9e9ef1f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733325692028 2024-12-04T15:21:33,653 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ce0c5b8373a45199dfc3d32db4dbd56, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733325692735 2024-12-04T15:21:33,691 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:33,692 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/4a974183deb04377bef74bac737c34f7 is 50, key is test_row_0/A:col10/1733325692735/Put/seqid=0 2024-12-04T15:21:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-04T15:21:33,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741857_1033 (size=12207) 2024-12-04T15:21:33,738 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/4a974183deb04377bef74bac737c34f7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4a974183deb04377bef74bac737c34f7 2024-12-04T15:21:33,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-04T15:21:33,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:33,745 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:21:33,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:33,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:33,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:33,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:33,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:33,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:33,751 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f847537f0bd84866ac1ed21ffbe55e54 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f847537f0bd84866ac1ed21ffbe55e54 2024-12-04T15:21:33,751 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 4a974183deb04377bef74bac737c34f7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:33,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,751 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325693646; duration=0sec 2024-12-04T15:21:33,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:21:33,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:33,752 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-12-04T15:21:33,754 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:21:33,754 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:21:33,754 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:33,754 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:33,755 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:33,757 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:33,757 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:33,757 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:33,758 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/08132e240abe4ef0b171abcf320cca72, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f64f520bdb854bdfa825e935b4fa97cc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/731b79f5c3814400bf0474eb55fed6f9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.3 K 2024-12-04T15:21:33,759 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08132e240abe4ef0b171abcf320cca72, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733325691917 2024-12-04T15:21:33,761 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f64f520bdb854bdfa825e935b4fa97cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733325692028 2024-12-04T15:21:33,763 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 731b79f5c3814400bf0474eb55fed6f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733325692735 2024-12-04T15:21:33,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/4183440fcdd54d8fbd75abdce6abb62d is 50, key is test_row_0/A:col10/1733325693443/Put/seqid=0 2024-12-04T15:21:33,767 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into f847537f0bd84866ac1ed21ffbe55e54(size=11.8 K), total size for store is 35.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:33,767 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,767 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=13, startTime=1733325692617; duration=0sec 2024-12-04T15:21:33,767 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:33,767 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:33,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:33,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741858_1034 (size=12001) 2024-12-04T15:21:33,788 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#20 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:33,789 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/92ae75732eca410494b462b58069491d is 50, key is test_row_0/B:col10/1733325692735/Put/seqid=0 2024-12-04T15:21:33,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325753792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325753792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325753797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325753800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325753797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741859_1035 (size=12207) 2024-12-04T15:21:33,845 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/92ae75732eca410494b462b58069491d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92ae75732eca410494b462b58069491d 2024-12-04T15:21:33,857 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 92ae75732eca410494b462b58069491d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:33,857 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:33,857 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325693647; duration=0sec 2024-12-04T15:21:33,858 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:33,858 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:33,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325753903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325753903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-04T15:21:33,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325753905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325753906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:33,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:33,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325753908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325754107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325754108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325754115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325754112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325754122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,185 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/4183440fcdd54d8fbd75abdce6abb62d 2024-12-04T15:21:34,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-04T15:21:34,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/a237298a1bc94fc8aeb82efdd68a1ebd is 50, key is test_row_0/B:col10/1733325693443/Put/seqid=0 2024-12-04T15:21:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741860_1036 (size=12001) 2024-12-04T15:21:34,258 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/a237298a1bc94fc8aeb82efdd68a1ebd 2024-12-04T15:21:34,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/5bde765ba4ff412f9f92ff9758dd746e is 50, key is test_row_0/C:col10/1733325693443/Put/seqid=0 2024-12-04T15:21:34,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741861_1037 (size=12001) 2024-12-04T15:21:34,321 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/5bde765ba4ff412f9f92ff9758dd746e 2024-12-04T15:21:34,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/4183440fcdd54d8fbd75abdce6abb62d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4183440fcdd54d8fbd75abdce6abb62d 2024-12-04T15:21:34,377 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4183440fcdd54d8fbd75abdce6abb62d, entries=150, sequenceid=113, filesize=11.7 K 2024-12-04T15:21:34,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/a237298a1bc94fc8aeb82efdd68a1ebd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/a237298a1bc94fc8aeb82efdd68a1ebd 2024-12-04T15:21:34,405 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/a237298a1bc94fc8aeb82efdd68a1ebd, entries=150, sequenceid=113, filesize=11.7 K 2024-12-04T15:21:34,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/5bde765ba4ff412f9f92ff9758dd746e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/5bde765ba4ff412f9f92ff9758dd746e 2024-12-04T15:21:34,419 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/5bde765ba4ff412f9f92ff9758dd746e, entries=150, sequenceid=113, filesize=11.7 K 2024-12-04T15:21:34,420 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e2e19d2bb9bfcadbc1f5e0b910706700 in 675ms, sequenceid=113, compaction requested=true 2024-12-04T15:21:34,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:34,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:34,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-04T15:21:34,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-04T15:21:34,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-04T15:21:34,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 834 msec 2024-12-04T15:21:34,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:34,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:21:34,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:34,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:34,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:34,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:34,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:34,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:34,436 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 846 msec 2024-12-04T15:21:34,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/97eb499b0aeb431b8f53a9bb11d485a9 is 50, key is test_row_0/A:col10/1733325694429/Put/seqid=0 2024-12-04T15:21:34,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325754475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325754478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325754478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325754481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325754482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741862_1038 (size=12101) 2024-12-04T15:21:34,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/97eb499b0aeb431b8f53a9bb11d485a9 2024-12-04T15:21:34,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/32b77f5667ea434499164eafbc687ed5 is 50, key is test_row_0/B:col10/1733325694429/Put/seqid=0 2024-12-04T15:21:34,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741863_1039 (size=12101) 2024-12-04T15:21:34,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/32b77f5667ea434499164eafbc687ed5 2024-12-04T15:21:34,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/430d9cc676034c99b8818f7cb767ee91 is 50, key is test_row_0/C:col10/1733325694429/Put/seqid=0 2024-12-04T15:21:34,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325754588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325754590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325754593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325754594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741864_1040 (size=12101) 2024-12-04T15:21:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325754591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/430d9cc676034c99b8818f7cb767ee91 2024-12-04T15:21:34,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/97eb499b0aeb431b8f53a9bb11d485a9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/97eb499b0aeb431b8f53a9bb11d485a9 2024-12-04T15:21:34,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/97eb499b0aeb431b8f53a9bb11d485a9, entries=150, sequenceid=130, filesize=11.8 K 2024-12-04T15:21:34,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/32b77f5667ea434499164eafbc687ed5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/32b77f5667ea434499164eafbc687ed5 2024-12-04T15:21:34,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/32b77f5667ea434499164eafbc687ed5, entries=150, sequenceid=130, filesize=11.8 K 2024-12-04T15:21:34,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/430d9cc676034c99b8818f7cb767ee91 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/430d9cc676034c99b8818f7cb767ee91 2024-12-04T15:21:34,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/430d9cc676034c99b8818f7cb767ee91, entries=150, sequenceid=130, filesize=11.8 K 2024-12-04T15:21:34,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e2e19d2bb9bfcadbc1f5e0b910706700 in 241ms, sequenceid=130, compaction requested=true 2024-12-04T15:21:34,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:34,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:34,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:34,670 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:34,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:34,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:34,671 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:34,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:34,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:34,673 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:34,673 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:34,673 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:34,674 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92ae75732eca410494b462b58069491d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/a237298a1bc94fc8aeb82efdd68a1ebd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/32b77f5667ea434499164eafbc687ed5] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.5 K 2024-12-04T15:21:34,674 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:34,674 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:34,675 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 92ae75732eca410494b462b58069491d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733325692735 2024-12-04T15:21:34,675 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:34,675 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4a974183deb04377bef74bac737c34f7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4183440fcdd54d8fbd75abdce6abb62d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/97eb499b0aeb431b8f53a9bb11d485a9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.5 K 2024-12-04T15:21:34,675 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a237298a1bc94fc8aeb82efdd68a1ebd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733325693437 2024-12-04T15:21:34,676 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 32b77f5667ea434499164eafbc687ed5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733325693791 2024-12-04T15:21:34,676 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a974183deb04377bef74bac737c34f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733325692735 2024-12-04T15:21:34,677 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4183440fcdd54d8fbd75abdce6abb62d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733325693437 2024-12-04T15:21:34,678 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97eb499b0aeb431b8f53a9bb11d485a9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733325693791 2024-12-04T15:21:34,707 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#26 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:34,708 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/504d34d2bcff46379fcb0d0f3ce3f0d4 is 50, key is test_row_0/A:col10/1733325694429/Put/seqid=0 2024-12-04T15:21:34,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-04T15:21:34,710 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-04T15:21:34,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-04T15:21:34,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-04T15:21:34,715 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:34,718 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:34,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:34,719 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:34,720 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/05c495ef7b484f1eb4846729a0a323be is 50, key is test_row_0/B:col10/1733325694429/Put/seqid=0 2024-12-04T15:21:34,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741865_1041 (size=12409) 2024-12-04T15:21:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741866_1042 (size=12409) 2024-12-04T15:21:34,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:34,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:21:34,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:34,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:34,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:34,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:34,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:34,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-04T15:21:34,827 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae is 50, key is test_row_0/A:col10/1733325694798/Put/seqid=0 2024-12-04T15:21:34,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325754823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325754821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325754826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325754840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325754840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741867_1043 (size=14541) 2024-12-04T15:21:34,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae 2024-12-04T15:21:34,871 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-04T15:21:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:34,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:34,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:34,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/f4cba283fd3d4fc8b9f53adcbc5c4a71 is 50, key is test_row_0/B:col10/1733325694798/Put/seqid=0 2024-12-04T15:21:34,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741868_1044 (size=12151) 2024-12-04T15:21:34,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/f4cba283fd3d4fc8b9f53adcbc5c4a71 2024-12-04T15:21:34,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325754943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b5ef8b9009f04fc8ab9c0084af61eb87 is 50, key is test_row_0/C:col10/1733325694798/Put/seqid=0 2024-12-04T15:21:34,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325754944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325754944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325754950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325754950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:34,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741869_1045 (size=12151) 2024-12-04T15:21:34,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b5ef8b9009f04fc8ab9c0084af61eb87 2024-12-04T15:21:34,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae 2024-12-04T15:21:35,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae, entries=200, sequenceid=153, filesize=14.2 K 2024-12-04T15:21:35,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/f4cba283fd3d4fc8b9f53adcbc5c4a71 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f4cba283fd3d4fc8b9f53adcbc5c4a71 2024-12-04T15:21:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-04T15:21:35,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f4cba283fd3d4fc8b9f53adcbc5c4a71, entries=150, sequenceid=153, filesize=11.9 K 2024-12-04T15:21:35,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b5ef8b9009f04fc8ab9c0084af61eb87 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b5ef8b9009f04fc8ab9c0084af61eb87 2024-12-04T15:21:35,035 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-04T15:21:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b5ef8b9009f04fc8ab9c0084af61eb87, entries=150, sequenceid=153, filesize=11.9 K 2024-12-04T15:21:35,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e2e19d2bb9bfcadbc1f5e0b910706700 in 253ms, sequenceid=153, compaction requested=true 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-04T15:21:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:35,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:21:35,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:35,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:35,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:35,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:35,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:35,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:35,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ef1314cb18b54f1c8613b4a3c4a3a0b4 is 50, key is test_row_0/A:col10/1733325695149/Put/seqid=0 2024-12-04T15:21:35,187 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/05c495ef7b484f1eb4846729a0a323be as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/05c495ef7b484f1eb4846729a0a323be 2024-12-04T15:21:35,190 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,190 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/504d34d2bcff46379fcb0d0f3ce3f0d4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/504d34d2bcff46379fcb0d0f3ce3f0d4 2024-12-04T15:21:35,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-04T15:21:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,203 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 504d34d2bcff46379fcb0d0f3ce3f0d4(size=12.1 K), total size for store is 26.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:35,203 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,203 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325694670; duration=0sec 2024-12-04T15:21:35,203 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 05c495ef7b484f1eb4846729a0a323be(size=12.1 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:35,203 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,203 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325694670; duration=0sec 2024-12-04T15:21:35,203 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-04T15:21:35,203 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:35,203 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-04T15:21:35,203 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:35,204 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-04T15:21:35,214 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72359 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-04T15:21:35,214 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:35,214 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 6 compacting, 0 eligible, 16 blocking 2024-12-04T15:21:35,214 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,214 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:21:35,214 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:21:35,214 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:35,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:35,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:35,215 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f847537f0bd84866ac1ed21ffbe55e54, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e01f910056304890995b04d8d0120e2a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/3146490b8eb0430caeeecd2ad3793874, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/5bde765ba4ff412f9f92ff9758dd746e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/430d9cc676034c99b8818f7cb767ee91, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b5ef8b9009f04fc8ab9c0084af61eb87] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=70.7 K 2024-12-04T15:21:35,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:35,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:21:35,215 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f847537f0bd84866ac1ed21ffbe55e54, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733325691917 2024-12-04T15:21:35,216 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e01f910056304890995b04d8d0120e2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733325692028 2024-12-04T15:21:35,217 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:21:35,218 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3146490b8eb0430caeeecd2ad3793874, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733325692735 2024-12-04T15:21:35,218 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:21:35,218 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:35,218 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:35,218 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:21:35,218 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5bde765ba4ff412f9f92ff9758dd746e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733325693437 2024-12-04T15:21:35,219 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 430d9cc676034c99b8818f7cb767ee91, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733325693791 2024-12-04T15:21:35,220 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:21:35,220 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:21:35,220 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:35,220 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:35,220 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5ef8b9009f04fc8ab9c0084af61eb87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733325694475 2024-12-04T15:21:35,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741870_1046 (size=14541) 2024-12-04T15:21:35,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ef1314cb18b54f1c8613b4a3c4a3a0b4 2024-12-04T15:21:35,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325755230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/78133090d2bf42a6ba9cf7d1678a8869 is 50, key is test_row_0/B:col10/1733325695149/Put/seqid=0 2024-12-04T15:21:35,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325755246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325755247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325755247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325755248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741871_1047 (size=12151) 2024-12-04T15:21:35,265 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#33 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:35,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/78133090d2bf42a6ba9cf7d1678a8869 2024-12-04T15:21:35,266 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8dfce65007004fca95890779eb8907b9 is 50, key is test_row_0/C:col10/1733325694798/Put/seqid=0 2024-12-04T15:21:35,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b90573bbea6740ab86e1a878e2998e5d is 50, key is test_row_0/C:col10/1733325695149/Put/seqid=0 2024-12-04T15:21:35,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-04T15:21:35,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741872_1048 (size=12459) 2024-12-04T15:21:35,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,352 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8dfce65007004fca95890779eb8907b9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dfce65007004fca95890779eb8907b9 2024-12-04T15:21:35,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-04T15:21:35,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:35,362 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 8dfce65007004fca95890779eb8907b9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:35,362 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,362 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=10, startTime=1733325695055; duration=0sec 2024-12-04T15:21:35,363 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:35,363 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:35,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325755357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325755357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325755358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325755370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325755370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741873_1049 (size=12151) 2024-12-04T15:21:35,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b90573bbea6740ab86e1a878e2998e5d 2024-12-04T15:21:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ef1314cb18b54f1c8613b4a3c4a3a0b4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef1314cb18b54f1c8613b4a3c4a3a0b4 2024-12-04T15:21:35,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef1314cb18b54f1c8613b4a3c4a3a0b4, entries=200, sequenceid=167, filesize=14.2 K 2024-12-04T15:21:35,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/78133090d2bf42a6ba9cf7d1678a8869 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78133090d2bf42a6ba9cf7d1678a8869 2024-12-04T15:21:35,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78133090d2bf42a6ba9cf7d1678a8869, entries=150, sequenceid=167, filesize=11.9 K 2024-12-04T15:21:35,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b90573bbea6740ab86e1a878e2998e5d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b90573bbea6740ab86e1a878e2998e5d 2024-12-04T15:21:35,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b90573bbea6740ab86e1a878e2998e5d, entries=150, sequenceid=167, filesize=11.9 K 2024-12-04T15:21:35,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e2e19d2bb9bfcadbc1f5e0b910706700 in 296ms, sequenceid=167, compaction requested=true 2024-12-04T15:21:35,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,452 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:35,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:35,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:35,453 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:35,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:35,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:35,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:35,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:35,454 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41491 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:35,454 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:35,454 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,454 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/504d34d2bcff46379fcb0d0f3ce3f0d4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef1314cb18b54f1c8613b4a3c4a3a0b4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=40.5 K 2024-12-04T15:21:35,456 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 504d34d2bcff46379fcb0d0f3ce3f0d4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733325693791 2024-12-04T15:21:35,457 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:35,457 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6d3e5aa7d4c4a07b2ec7a3a837b3cae, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733325694475 2024-12-04T15:21:35,457 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:35,457 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,458 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/05c495ef7b484f1eb4846729a0a323be, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f4cba283fd3d4fc8b9f53adcbc5c4a71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78133090d2bf42a6ba9cf7d1678a8869] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=35.9 K 2024-12-04T15:21:35,458 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef1314cb18b54f1c8613b4a3c4a3a0b4, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733325694817 2024-12-04T15:21:35,459 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 05c495ef7b484f1eb4846729a0a323be, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733325693791 2024-12-04T15:21:35,460 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f4cba283fd3d4fc8b9f53adcbc5c4a71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733325694475 2024-12-04T15:21:35,461 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 78133090d2bf42a6ba9cf7d1678a8869, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733325694817 2024-12-04T15:21:35,482 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#35 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:35,483 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7cd52599af43475a81d66798052aaa2a is 50, key is test_row_0/A:col10/1733325695149/Put/seqid=0 2024-12-04T15:21:35,487 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:35,488 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/5437f851b986446792632077a7426374 is 50, key is test_row_0/B:col10/1733325695149/Put/seqid=0 2024-12-04T15:21:35,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741874_1050 (size=12561) 2024-12-04T15:21:35,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-04T15:21:35,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:35,517 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:21:35,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:35,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:35,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:35,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:35,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:35,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741875_1051 (size=12561) 2024-12-04T15:21:35,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/3b3e0252e9b4468bb46400e8ecfb5110 is 50, key is test_row_0/A:col10/1733325695229/Put/seqid=0 2024-12-04T15:21:35,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:35,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:35,579 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/5437f851b986446792632077a7426374 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/5437f851b986446792632077a7426374 2024-12-04T15:21:35,594 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 5437f851b986446792632077a7426374(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:35,594 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,594 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325695453; duration=0sec 2024-12-04T15:21:35,594 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:35,594 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:35,594 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:21:35,596 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:21:35,596 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:21:35,597 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:35,597 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:35,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325755607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325755610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325755614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325755614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741876_1052 (size=12151) 2024-12-04T15:21:35,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325755616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325755718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325755725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325755725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325755725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325755725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-04T15:21:35,921 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7cd52599af43475a81d66798052aaa2a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7cd52599af43475a81d66798052aaa2a 2024-12-04T15:21:35,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325755936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325755937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,941 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 7cd52599af43475a81d66798052aaa2a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:35,941 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:35,941 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325695452; duration=0sec 2024-12-04T15:21:35,941 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:35,941 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:35,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325755938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325755941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:35,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325755942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,022 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/3b3e0252e9b4468bb46400e8ecfb5110 2024-12-04T15:21:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/563315c6ea964bee87cc7cc720893bac is 50, key is test_row_0/B:col10/1733325695229/Put/seqid=0 2024-12-04T15:21:36,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741877_1053 (size=12151) 2024-12-04T15:21:36,084 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/563315c6ea964bee87cc7cc720893bac 2024-12-04T15:21:36,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/35e15ac0294f40d0bf84e29927740fe0 is 50, key is test_row_0/C:col10/1733325695229/Put/seqid=0 2024-12-04T15:21:36,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741878_1054 (size=12151) 2024-12-04T15:21:36,133 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/35e15ac0294f40d0bf84e29927740fe0 2024-12-04T15:21:36,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/3b3e0252e9b4468bb46400e8ecfb5110 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3b3e0252e9b4468bb46400e8ecfb5110 2024-12-04T15:21:36,154 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3b3e0252e9b4468bb46400e8ecfb5110, entries=150, sequenceid=192, filesize=11.9 K 2024-12-04T15:21:36,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/563315c6ea964bee87cc7cc720893bac as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/563315c6ea964bee87cc7cc720893bac 2024-12-04T15:21:36,167 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/563315c6ea964bee87cc7cc720893bac, entries=150, sequenceid=192, filesize=11.9 K 2024-12-04T15:21:36,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/35e15ac0294f40d0bf84e29927740fe0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/35e15ac0294f40d0bf84e29927740fe0 2024-12-04T15:21:36,178 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/35e15ac0294f40d0bf84e29927740fe0, entries=150, sequenceid=192, filesize=11.9 K 2024-12-04T15:21:36,180 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e2e19d2bb9bfcadbc1f5e0b910706700 in 664ms, sequenceid=192, compaction requested=true 2024-12-04T15:21:36,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:36,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:36,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-04T15:21:36,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-04T15:21:36,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-04T15:21:36,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4650 sec 2024-12-04T15:21:36,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.4750 sec 2024-12-04T15:21:36,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:36,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:21:36,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:36,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:36,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:36,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:36,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:36,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:36,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/17f4158d7b5847b3a12f802abdc3fa78 is 50, key is test_row_1/A:col10/1733325696246/Put/seqid=0 2024-12-04T15:21:36,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741879_1055 (size=9757) 2024-12-04T15:21:36,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325756288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325756290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/17f4158d7b5847b3a12f802abdc3fa78 2024-12-04T15:21:36,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325756291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325756292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325756299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0aa96b9b63e34dbe942857ff00c43072 is 50, key is test_row_1/B:col10/1733325696246/Put/seqid=0 2024-12-04T15:21:36,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741880_1056 (size=9757) 2024-12-04T15:21:36,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325756405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325756405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325756405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325756405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325756429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325756610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325756613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325756613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325756618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325756634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0aa96b9b63e34dbe942857ff00c43072 2024-12-04T15:21:36,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e347c1535aff466a8581c38899b6f6b8 is 50, key is test_row_1/C:col10/1733325696246/Put/seqid=0 2024-12-04T15:21:36,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741881_1057 (size=9757) 2024-12-04T15:21:36,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e347c1535aff466a8581c38899b6f6b8 2024-12-04T15:21:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-04T15:21:36,838 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-04T15:21:36,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-04T15:21:36,844 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:36,845 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:36,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:36,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/17f4158d7b5847b3a12f802abdc3fa78 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/17f4158d7b5847b3a12f802abdc3fa78 2024-12-04T15:21:36,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/17f4158d7b5847b3a12f802abdc3fa78, entries=100, sequenceid=210, filesize=9.5 K 2024-12-04T15:21:36,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0aa96b9b63e34dbe942857ff00c43072 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0aa96b9b63e34dbe942857ff00c43072 2024-12-04T15:21:36,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0aa96b9b63e34dbe942857ff00c43072, entries=100, sequenceid=210, filesize=9.5 K 2024-12-04T15:21:36,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e347c1535aff466a8581c38899b6f6b8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e347c1535aff466a8581c38899b6f6b8 2024-12-04T15:21:36,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e347c1535aff466a8581c38899b6f6b8, entries=100, sequenceid=210, filesize=9.5 K 2024-12-04T15:21:36,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for e2e19d2bb9bfcadbc1f5e0b910706700 in 656ms, sequenceid=210, compaction requested=true 2024-12-04T15:21:36,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:36,905 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:36,906 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:36,907 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:36,907 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:36,907 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7cd52599af43475a81d66798052aaa2a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3b3e0252e9b4468bb46400e8ecfb5110, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/17f4158d7b5847b3a12f802abdc3fa78] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=33.7 K 2024-12-04T15:21:36,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cd52599af43475a81d66798052aaa2a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733325694817 2024-12-04T15:21:36,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b3e0252e9b4468bb46400e8ecfb5110, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733325695228 2024-12-04T15:21:36,912 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17f4158d7b5847b3a12f802abdc3fa78, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325696246 2024-12-04T15:21:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:36,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:36,913 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:36,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:36,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:36,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:36,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:36,916 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:36,916 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:36,916 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:36,916 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/5437f851b986446792632077a7426374, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/563315c6ea964bee87cc7cc720893bac, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0aa96b9b63e34dbe942857ff00c43072] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=33.7 K 2024-12-04T15:21:36,917 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5437f851b986446792632077a7426374, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733325694817 2024-12-04T15:21:36,920 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 563315c6ea964bee87cc7cc720893bac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733325695228 2024-12-04T15:21:36,920 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aa96b9b63e34dbe942857ff00c43072, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325696246 2024-12-04T15:21:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:36,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:36,936 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:36,936 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a8e8bb85900b4d58a10de8d583d5100e is 50, key is test_row_0/A:col10/1733325695229/Put/seqid=0 2024-12-04T15:21:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:36,958 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#44 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:36,958 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/053e08fbc64444f19b1864409af89de9 is 50, key is test_row_0/B:col10/1733325695229/Put/seqid=0 2024-12-04T15:21:36,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/66907a41467d4d2092ce2b023129e954 is 50, key is test_row_0/A:col10/1733325696922/Put/seqid=0 2024-12-04T15:21:36,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325756961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325756961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325756962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325756963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325756963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:36,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:36,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741882_1058 (size=12663) 2024-12-04T15:21:37,037 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a8e8bb85900b4d58a10de8d583d5100e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a8e8bb85900b4d58a10de8d583d5100e 2024-12-04T15:21:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741883_1059 (size=12663) 2024-12-04T15:21:37,047 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into a8e8bb85900b4d58a10de8d583d5100e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:37,047 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:37,047 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325696905; duration=0sec 2024-12-04T15:21:37,048 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:37,048 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:37,048 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:37,054 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46518 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:37,054 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:37,054 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,054 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dfce65007004fca95890779eb8907b9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b90573bbea6740ab86e1a878e2998e5d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/35e15ac0294f40d0bf84e29927740fe0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e347c1535aff466a8581c38899b6f6b8] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=45.4 K 2024-12-04T15:21:37,056 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dfce65007004fca95890779eb8907b9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733325694475 2024-12-04T15:21:37,057 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b90573bbea6740ab86e1a878e2998e5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733325694817 2024-12-04T15:21:37,057 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/053e08fbc64444f19b1864409af89de9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/053e08fbc64444f19b1864409af89de9 2024-12-04T15:21:37,058 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35e15ac0294f40d0bf84e29927740fe0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733325695228 2024-12-04T15:21:37,059 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e347c1535aff466a8581c38899b6f6b8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325696246 2024-12-04T15:21:37,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741884_1060 (size=12151) 2024-12-04T15:21:37,068 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 053e08fbc64444f19b1864409af89de9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:37,068 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:37,068 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325696913; duration=0sec 2024-12-04T15:21:37,069 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:37,069 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325757069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325757067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325757070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325757070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325757071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,093 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#46 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:37,094 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f2ba857de0484fb5899943c3fe2f43a8 is 50, key is test_row_0/C:col10/1733325695229/Put/seqid=0 2024-12-04T15:21:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741885_1061 (size=12595) 2024-12-04T15:21:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:37,148 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/f2ba857de0484fb5899943c3fe2f43a8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f2ba857de0484fb5899943c3fe2f43a8 2024-12-04T15:21:37,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:37,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:37,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,166 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into f2ba857de0484fb5899943c3fe2f43a8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:37,167 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:37,167 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=12, startTime=1733325696914; duration=0sec 2024-12-04T15:21:37,167 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:37,167 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:37,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325757281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325757281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325757281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325757293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325757293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:37,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:37,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/66907a41467d4d2092ce2b023129e954 2024-12-04T15:21:37,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:37,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:37,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/20a55b6a91324218b9f680942d4c44bb is 50, key is test_row_0/B:col10/1733325696922/Put/seqid=0 2024-12-04T15:21:37,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741886_1062 (size=12151) 2024-12-04T15:21:37,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325757585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325757587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325757587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325757606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325757617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:37,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:37,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,781 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:37,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:37,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/20a55b6a91324218b9f680942d4c44bb 2024-12-04T15:21:37,939 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:37,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/057aa998dce64968b40be5e58cc7d920 is 50, key is test_row_0/C:col10/1733325696922/Put/seqid=0 2024-12-04T15:21:37,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:37,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:37,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:37,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741887_1063 (size=12151) 2024-12-04T15:21:37,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/057aa998dce64968b40be5e58cc7d920 2024-12-04T15:21:38,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/66907a41467d4d2092ce2b023129e954 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/66907a41467d4d2092ce2b023129e954 2024-12-04T15:21:38,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/66907a41467d4d2092ce2b023129e954, entries=150, sequenceid=234, filesize=11.9 K 2024-12-04T15:21:38,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/20a55b6a91324218b9f680942d4c44bb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/20a55b6a91324218b9f680942d4c44bb 2024-12-04T15:21:38,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/20a55b6a91324218b9f680942d4c44bb, entries=150, sequenceid=234, filesize=11.9 K 2024-12-04T15:21:38,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/057aa998dce64968b40be5e58cc7d920 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/057aa998dce64968b40be5e58cc7d920 2024-12-04T15:21:38,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/057aa998dce64968b40be5e58cc7d920, entries=150, sequenceid=234, filesize=11.9 K 2024-12-04T15:21:38,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for e2e19d2bb9bfcadbc1f5e0b910706700 in 1110ms, sequenceid=234, compaction requested=false 2024-12-04T15:21:38,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:38,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:21:38,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:38,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:38,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:38,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:38,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:38,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:38,098 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:38,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:38,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/28d080a2a3d148ba97b17c5d4fa73eef is 50, key is test_row_0/A:col10/1733325696957/Put/seqid=0 2024-12-04T15:21:38,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741888_1064 (size=12151) 2024-12-04T15:21:38,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/28d080a2a3d148ba97b17c5d4fa73eef 2024-12-04T15:21:38,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325758139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325758139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325758140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325758141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325758145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/ca66bcbd425e45b38b0d5a705ca168c4 is 50, key is test_row_0/B:col10/1733325696957/Put/seqid=0 2024-12-04T15:21:38,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741889_1065 (size=12151) 2024-12-04T15:21:38,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/ca66bcbd425e45b38b0d5a705ca168c4 2024-12-04T15:21:38,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/02345b96ce504456912bc08d3a706c02 is 50, key is test_row_0/C:col10/1733325696957/Put/seqid=0 2024-12-04T15:21:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741890_1066 (size=12151) 2024-12-04T15:21:38,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/02345b96ce504456912bc08d3a706c02 2024-12-04T15:21:38,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/28d080a2a3d148ba97b17c5d4fa73eef as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/28d080a2a3d148ba97b17c5d4fa73eef 2024-12-04T15:21:38,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/28d080a2a3d148ba97b17c5d4fa73eef, entries=150, sequenceid=251, filesize=11.9 K 2024-12-04T15:21:38,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/ca66bcbd425e45b38b0d5a705ca168c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ca66bcbd425e45b38b0d5a705ca168c4 2024-12-04T15:21:38,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ca66bcbd425e45b38b0d5a705ca168c4, entries=150, sequenceid=251, filesize=11.9 K 2024-12-04T15:21:38,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/02345b96ce504456912bc08d3a706c02 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/02345b96ce504456912bc08d3a706c02 2024-12-04T15:21:38,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/02345b96ce504456912bc08d3a706c02, entries=150, sequenceid=251, filesize=11.9 K 2024-12-04T15:21:38,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e2e19d2bb9bfcadbc1f5e0b910706700 in 134ms, sequenceid=251, compaction requested=true 2024-12-04T15:21:38,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:38,228 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:38,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:38,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:38,229 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:38,230 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:38,230 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:38,230 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,230 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a8e8bb85900b4d58a10de8d583d5100e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/66907a41467d4d2092ce2b023129e954, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/28d080a2a3d148ba97b17c5d4fa73eef] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.1 K 2024-12-04T15:21:38,231 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8e8bb85900b4d58a10de8d583d5100e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325695229 2024-12-04T15:21:38,232 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:38,232 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:38,232 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,232 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/053e08fbc64444f19b1864409af89de9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/20a55b6a91324218b9f680942d4c44bb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ca66bcbd425e45b38b0d5a705ca168c4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.1 K 2024-12-04T15:21:38,232 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66907a41467d4d2092ce2b023129e954, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733325696284 2024-12-04T15:21:38,233 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 053e08fbc64444f19b1864409af89de9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325695229 2024-12-04T15:21:38,233 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28d080a2a3d148ba97b17c5d4fa73eef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325696957 2024-12-04T15:21:38,234 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 20a55b6a91324218b9f680942d4c44bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733325696284 2024-12-04T15:21:38,235 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ca66bcbd425e45b38b0d5a705ca168c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325696957 2024-12-04T15:21:38,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:38,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:38,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:38,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:38,250 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:38,251 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/325431971d9d4ce88ab2f8d5fcd1be25 is 50, key is test_row_0/A:col10/1733325696957/Put/seqid=0 2024-12-04T15:21:38,253 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:38,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:38,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:21:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:38,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:38,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:38,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:38,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:38,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:38,259 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#53 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:38,260 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/1aa090ef2c9242068634363cf0453b84 is 50, key is test_row_0/B:col10/1733325696957/Put/seqid=0 2024-12-04T15:21:38,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325758270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325758272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325758273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325758273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/1cdaf297791a4dfea1bc390716fcb43a is 50, key is test_row_0/A:col10/1733325698254/Put/seqid=0 2024-12-04T15:21:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741891_1067 (size=12765) 2024-12-04T15:21:38,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741892_1068 (size=12765) 2024-12-04T15:21:38,336 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/1aa090ef2c9242068634363cf0453b84 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1aa090ef2c9242068634363cf0453b84 2024-12-04T15:21:38,344 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 1aa090ef2c9242068634363cf0453b84(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:38,344 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:38,344 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325698229; duration=0sec 2024-12-04T15:21:38,345 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:38,345 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:38,345 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:38,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:38,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:38,348 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,348 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f2ba857de0484fb5899943c3fe2f43a8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/057aa998dce64968b40be5e58cc7d920, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/02345b96ce504456912bc08d3a706c02] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.0 K 2024-12-04T15:21:38,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741893_1069 (size=12301) 2024-12-04T15:21:38,350 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f2ba857de0484fb5899943c3fe2f43a8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325695229 2024-12-04T15:21:38,351 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 057aa998dce64968b40be5e58cc7d920, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733325696284 2024-12-04T15:21:38,352 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 02345b96ce504456912bc08d3a706c02, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325696957 2024-12-04T15:21:38,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325758376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325758378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325758378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325758378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,386 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:38,387 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8fa094dafe7e4fb9a84c9ea49899dcb9 is 50, key is test_row_0/C:col10/1733325696957/Put/seqid=0 2024-12-04T15:21:38,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:38,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:38,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741894_1070 (size=12697) 2024-12-04T15:21:38,451 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8fa094dafe7e4fb9a84c9ea49899dcb9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8fa094dafe7e4fb9a84c9ea49899dcb9 2024-12-04T15:21:38,461 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 8fa094dafe7e4fb9a84c9ea49899dcb9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:38,461 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:38,461 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=13, startTime=1733325698245; duration=0sec 2024-12-04T15:21:38,461 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:38,461 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:38,563 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325758581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325758586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325758585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325758587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,711 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/325431971d9d4ce88ab2f8d5fcd1be25 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/325431971d9d4ce88ab2f8d5fcd1be25 2024-12-04T15:21:38,722 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 325431971d9d4ce88ab2f8d5fcd1be25(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:38,722 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:38,722 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325698228; duration=0sec 2024-12-04T15:21:38,722 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:38,722 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:38,724 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:38,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:38,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/1cdaf297791a4dfea1bc390716fcb43a 2024-12-04T15:21:38,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/78bf3dfe597341a38aa47c2e16d5ded8 is 50, key is test_row_0/B:col10/1733325698254/Put/seqid=0 2024-12-04T15:21:38,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741895_1071 (size=12301) 2024-12-04T15:21:38,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/78bf3dfe597341a38aa47c2e16d5ded8 2024-12-04T15:21:38,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/72f16cf8325c48ba836b037e35213b66 is 50, key is test_row_0/C:col10/1733325698254/Put/seqid=0 2024-12-04T15:21:38,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741896_1072 (size=12301) 2024-12-04T15:21:38,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/72f16cf8325c48ba836b037e35213b66 2024-12-04T15:21:38,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/1cdaf297791a4dfea1bc390716fcb43a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1cdaf297791a4dfea1bc390716fcb43a 2024-12-04T15:21:38,879 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:38,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:38,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:38,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1cdaf297791a4dfea1bc390716fcb43a, entries=150, sequenceid=276, filesize=12.0 K 2024-12-04T15:21:38,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/78bf3dfe597341a38aa47c2e16d5ded8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78bf3dfe597341a38aa47c2e16d5ded8 2024-12-04T15:21:38,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325758887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325758891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78bf3dfe597341a38aa47c2e16d5ded8, entries=150, sequenceid=276, filesize=12.0 K 2024-12-04T15:21:38,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325758891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325758892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:38,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/72f16cf8325c48ba836b037e35213b66 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/72f16cf8325c48ba836b037e35213b66 2024-12-04T15:21:38,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/72f16cf8325c48ba836b037e35213b66, entries=150, sequenceid=276, filesize=12.0 K 2024-12-04T15:21:38,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for e2e19d2bb9bfcadbc1f5e0b910706700 in 652ms, sequenceid=276, compaction requested=false 2024-12-04T15:21:38,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:39,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-04T15:21:39,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:39,035 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:21:39,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:39,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ac51b1ba14794d3e91b7429e4bd07653 is 50, key is test_row_0/A:col10/1733325698264/Put/seqid=0 2024-12-04T15:21:39,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741897_1073 (size=12301) 2024-12-04T15:21:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:39,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:39,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325759304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325759396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325759396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325759399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325759400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325759408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,475 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ac51b1ba14794d3e91b7429e4bd07653 2024-12-04T15:21:39,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/d1df564e84c7470a8d81ec3354700cbf is 50, key is test_row_0/B:col10/1733325698264/Put/seqid=0 2024-12-04T15:21:39,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741898_1074 (size=12301) 2024-12-04T15:21:39,535 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/d1df564e84c7470a8d81ec3354700cbf 2024-12-04T15:21:39,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/73d750b3a0a34d50866dc8c4bbc50df8 is 50, key is test_row_0/C:col10/1733325698264/Put/seqid=0 2024-12-04T15:21:39,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741899_1075 (size=12301) 2024-12-04T15:21:39,578 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/73d750b3a0a34d50866dc8c4bbc50df8 2024-12-04T15:21:39,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ac51b1ba14794d3e91b7429e4bd07653 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ac51b1ba14794d3e91b7429e4bd07653 2024-12-04T15:21:39,602 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ac51b1ba14794d3e91b7429e4bd07653, entries=150, sequenceid=290, filesize=12.0 K 2024-12-04T15:21:39,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/d1df564e84c7470a8d81ec3354700cbf as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/d1df564e84c7470a8d81ec3354700cbf 2024-12-04T15:21:39,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325759614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,618 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/d1df564e84c7470a8d81ec3354700cbf, entries=150, sequenceid=290, filesize=12.0 K 2024-12-04T15:21:39,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/73d750b3a0a34d50866dc8c4bbc50df8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/73d750b3a0a34d50866dc8c4bbc50df8 2024-12-04T15:21:39,645 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/73d750b3a0a34d50866dc8c4bbc50df8, entries=150, sequenceid=290, filesize=12.0 K 2024-12-04T15:21:39,647 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e2e19d2bb9bfcadbc1f5e0b910706700 in 612ms, sequenceid=290, compaction requested=true 2024-12-04T15:21:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-04T15:21:39,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-04T15:21:39,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-04T15:21:39,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8060 sec 2024-12-04T15:21:39,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.8180 sec 2024-12-04T15:21:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:39,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:21:39,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:39,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:39,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:39,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:39,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:39,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:39,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/08b203c7b5a140fc94563743176d55dd is 50, key is test_row_0/A:col10/1733325699257/Put/seqid=0 2024-12-04T15:21:39,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741900_1076 (size=14741) 2024-12-04T15:21:39,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325759952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:39,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/08b203c7b5a140fc94563743176d55dd 2024-12-04T15:21:39,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/02d647351e6d4c91ac3a16d4801109ae is 50, key is test_row_0/B:col10/1733325699257/Put/seqid=0 2024-12-04T15:21:39,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741901_1077 (size=12301) 2024-12-04T15:21:39,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/02d647351e6d4c91ac3a16d4801109ae 2024-12-04T15:21:40,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/c3bfe16fc9c9460688ffe53230e581c4 is 50, key is test_row_0/C:col10/1733325699257/Put/seqid=0 2024-12-04T15:21:40,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741902_1078 (size=12301) 2024-12-04T15:21:40,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/c3bfe16fc9c9460688ffe53230e581c4 2024-12-04T15:21:40,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/08b203c7b5a140fc94563743176d55dd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/08b203c7b5a140fc94563743176d55dd 2024-12-04T15:21:40,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/08b203c7b5a140fc94563743176d55dd, entries=200, sequenceid=316, filesize=14.4 K 2024-12-04T15:21:40,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/02d647351e6d4c91ac3a16d4801109ae as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/02d647351e6d4c91ac3a16d4801109ae 2024-12-04T15:21:40,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325760058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/02d647351e6d4c91ac3a16d4801109ae, entries=150, sequenceid=316, filesize=12.0 K 2024-12-04T15:21:40,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/c3bfe16fc9c9460688ffe53230e581c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/c3bfe16fc9c9460688ffe53230e581c4 2024-12-04T15:21:40,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/c3bfe16fc9c9460688ffe53230e581c4, entries=150, sequenceid=316, filesize=12.0 K 2024-12-04T15:21:40,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e2e19d2bb9bfcadbc1f5e0b910706700 in 183ms, sequenceid=316, compaction requested=true 2024-12-04T15:21:40,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:40,105 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:40,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:40,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:40,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:40,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:40,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:40,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:40,106 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:40,110 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52108 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:40,110 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:40,110 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:40,111 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/325431971d9d4ce88ab2f8d5fcd1be25, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1cdaf297791a4dfea1bc390716fcb43a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ac51b1ba14794d3e91b7429e4bd07653, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/08b203c7b5a140fc94563743176d55dd] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=50.9 K 2024-12-04T15:21:40,111 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 325431971d9d4ce88ab2f8d5fcd1be25, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325696957 2024-12-04T15:21:40,111 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:40,112 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:40,112 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:40,114 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1aa090ef2c9242068634363cf0453b84, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78bf3dfe597341a38aa47c2e16d5ded8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/d1df564e84c7470a8d81ec3354700cbf, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/02d647351e6d4c91ac3a16d4801109ae] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=48.5 K 2024-12-04T15:21:40,114 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1aa090ef2c9242068634363cf0453b84, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325696957 2024-12-04T15:21:40,115 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cdaf297791a4dfea1bc390716fcb43a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733325698251 2024-12-04T15:21:40,115 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 78bf3dfe597341a38aa47c2e16d5ded8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733325698251 2024-12-04T15:21:40,115 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac51b1ba14794d3e91b7429e4bd07653, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733325698264 2024-12-04T15:21:40,115 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d1df564e84c7470a8d81ec3354700cbf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733325698264 2024-12-04T15:21:40,116 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08b203c7b5a140fc94563743176d55dd, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325699220 2024-12-04T15:21:40,116 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 02d647351e6d4c91ac3a16d4801109ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325699220 2024-12-04T15:21:40,133 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#64 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:40,134 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/3751a3e688b640edb5daed2927dfa6ae is 50, key is test_row_0/A:col10/1733325699257/Put/seqid=0 2024-12-04T15:21:40,144 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#65 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:40,146 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/c051ea836e554c89868eb4b4e1f46ce9 is 50, key is test_row_0/B:col10/1733325699257/Put/seqid=0 2024-12-04T15:21:40,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741903_1079 (size=13051) 2024-12-04T15:21:40,164 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/3751a3e688b640edb5daed2927dfa6ae as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3751a3e688b640edb5daed2927dfa6ae 2024-12-04T15:21:40,178 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 3751a3e688b640edb5daed2927dfa6ae(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:40,178 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:40,178 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=12, startTime=1733325700105; duration=0sec 2024-12-04T15:21:40,179 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:40,179 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:40,179 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:40,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741904_1080 (size=13051) 2024-12-04T15:21:40,183 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49600 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:40,183 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:40,183 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:40,183 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8fa094dafe7e4fb9a84c9ea49899dcb9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/72f16cf8325c48ba836b037e35213b66, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/73d750b3a0a34d50866dc8c4bbc50df8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/c3bfe16fc9c9460688ffe53230e581c4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=48.4 K 2024-12-04T15:21:40,184 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fa094dafe7e4fb9a84c9ea49899dcb9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325696957 2024-12-04T15:21:40,187 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72f16cf8325c48ba836b037e35213b66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733325698251 2024-12-04T15:21:40,189 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73d750b3a0a34d50866dc8c4bbc50df8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733325698264 2024-12-04T15:21:40,190 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3bfe16fc9c9460688ffe53230e581c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325699220 2024-12-04T15:21:40,194 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/c051ea836e554c89868eb4b4e1f46ce9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/c051ea836e554c89868eb4b4e1f46ce9 2024-12-04T15:21:40,204 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into c051ea836e554c89868eb4b4e1f46ce9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:40,204 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:40,204 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=12, startTime=1733325700106; duration=0sec 2024-12-04T15:21:40,205 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:40,205 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:40,211 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:40,212 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7828b917b3e9459a99152742bc57ae80 is 50, key is test_row_0/C:col10/1733325699257/Put/seqid=0 2024-12-04T15:21:40,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741905_1081 (size=12983) 2024-12-04T15:21:40,255 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7828b917b3e9459a99152742bc57ae80 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7828b917b3e9459a99152742bc57ae80 2024-12-04T15:21:40,269 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 7828b917b3e9459a99152742bc57ae80(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:40,269 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:40,270 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=12, startTime=1733325700106; duration=0sec 2024-12-04T15:21:40,270 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:40,270 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:40,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:21:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:40,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b34ee4bdb7e444a086eac0d42e2a2b81 is 50, key is test_row_0/A:col10/1733325699943/Put/seqid=0 2024-12-04T15:21:40,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741906_1082 (size=14741) 2024-12-04T15:21:40,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b34ee4bdb7e444a086eac0d42e2a2b81 2024-12-04T15:21:40,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/050bd1a5a9fb42cca0c41ef38520de91 is 50, key is test_row_0/B:col10/1733325699943/Put/seqid=0 2024-12-04T15:21:40,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741907_1083 (size=12301) 2024-12-04T15:21:40,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/050bd1a5a9fb42cca0c41ef38520de91 2024-12-04T15:21:40,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2cbde14c6f3147b7b22d6dc6895adef6 is 50, key is test_row_0/C:col10/1733325699943/Put/seqid=0 2024-12-04T15:21:40,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325760366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741908_1084 (size=12301) 2024-12-04T15:21:40,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2cbde14c6f3147b7b22d6dc6895adef6 2024-12-04T15:21:40,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/b34ee4bdb7e444a086eac0d42e2a2b81 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b34ee4bdb7e444a086eac0d42e2a2b81 2024-12-04T15:21:40,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325760405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b34ee4bdb7e444a086eac0d42e2a2b81, entries=200, sequenceid=330, filesize=14.4 K 2024-12-04T15:21:40,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325760406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/050bd1a5a9fb42cca0c41ef38520de91 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/050bd1a5a9fb42cca0c41ef38520de91 2024-12-04T15:21:40,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325760408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325760409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/050bd1a5a9fb42cca0c41ef38520de91, entries=150, sequenceid=330, filesize=12.0 K 2024-12-04T15:21:40,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2cbde14c6f3147b7b22d6dc6895adef6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2cbde14c6f3147b7b22d6dc6895adef6 2024-12-04T15:21:40,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2cbde14c6f3147b7b22d6dc6895adef6, entries=150, sequenceid=330, filesize=12.0 K 2024-12-04T15:21:40,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e2e19d2bb9bfcadbc1f5e0b910706700 in 157ms, sequenceid=330, compaction requested=false 2024-12-04T15:21:40,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:40,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:21:40,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:40,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:40,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:40,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:40,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:40,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:40,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/667e5bc8c04d4d43a37e1318430975c8 is 50, key is test_row_0/A:col10/1733325700343/Put/seqid=0 2024-12-04T15:21:40,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325760498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741909_1085 (size=14741) 2024-12-04T15:21:40,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325760602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325760805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:40,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/667e5bc8c04d4d43a37e1318430975c8 2024-12-04T15:21:40,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/82477eae13c54293abb8bf8bdfa3effa is 50, key is test_row_0/B:col10/1733325700343/Put/seqid=0 2024-12-04T15:21:40,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741910_1086 (size=12301) 2024-12-04T15:21:40,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-04T15:21:40,955 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-04T15:21:40,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:40,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-04T15:21:40,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-04T15:21:40,961 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:40,962 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:40,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:41,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-04T15:21:41,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325761110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,115 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-04T15:21:41,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:41,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-04T15:21:41,271 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-04T15:21:41,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:41,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/82477eae13c54293abb8bf8bdfa3effa 2024-12-04T15:21:41,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e7212d5576a441259fdfcdeef258e18b is 50, key is test_row_0/C:col10/1733325700343/Put/seqid=0 2024-12-04T15:21:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741911_1087 (size=12301) 2024-12-04T15:21:41,427 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-04T15:21:41,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:41,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-04T15:21:41,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-04T15:21:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:41,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325761613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,735 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-04T15:21:41,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:41,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:41,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e7212d5576a441259fdfcdeef258e18b 2024-12-04T15:21:41,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/667e5bc8c04d4d43a37e1318430975c8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/667e5bc8c04d4d43a37e1318430975c8 2024-12-04T15:21:41,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/667e5bc8c04d4d43a37e1318430975c8, entries=200, sequenceid=356, filesize=14.4 K 2024-12-04T15:21:41,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/82477eae13c54293abb8bf8bdfa3effa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/82477eae13c54293abb8bf8bdfa3effa 2024-12-04T15:21:41,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/82477eae13c54293abb8bf8bdfa3effa, entries=150, sequenceid=356, filesize=12.0 K 2024-12-04T15:21:41,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/e7212d5576a441259fdfcdeef258e18b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e7212d5576a441259fdfcdeef258e18b 2024-12-04T15:21:41,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e7212d5576a441259fdfcdeef258e18b, entries=150, sequenceid=356, filesize=12.0 K 2024-12-04T15:21:41,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e2e19d2bb9bfcadbc1f5e0b910706700 in 1370ms, sequenceid=356, compaction requested=true 2024-12-04T15:21:41,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:41,847 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:41,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:41,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:41,847 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:41,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:41,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:41,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:41,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:41,850 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:41,850 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:41,850 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42533 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:41,850 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,850 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:41,850 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/c051ea836e554c89868eb4b4e1f46ce9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/050bd1a5a9fb42cca0c41ef38520de91, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/82477eae13c54293abb8bf8bdfa3effa] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.8 K 2024-12-04T15:21:41,850 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,851 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3751a3e688b640edb5daed2927dfa6ae, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b34ee4bdb7e444a086eac0d42e2a2b81, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/667e5bc8c04d4d43a37e1318430975c8] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=41.5 K 2024-12-04T15:21:41,852 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3751a3e688b640edb5daed2927dfa6ae, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325699220 2024-12-04T15:21:41,853 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c051ea836e554c89868eb4b4e1f46ce9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325699220 2024-12-04T15:21:41,853 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b34ee4bdb7e444a086eac0d42e2a2b81, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325699940 2024-12-04T15:21:41,853 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 050bd1a5a9fb42cca0c41ef38520de91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325699940 2024-12-04T15:21:41,854 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 667e5bc8c04d4d43a37e1318430975c8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325700343 2024-12-04T15:21:41,854 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 82477eae13c54293abb8bf8bdfa3effa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325700343 2024-12-04T15:21:41,873 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#73 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:41,874 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7d1ca61540d54a6c9d7dfa4319e1cb71 is 50, key is test_row_0/A:col10/1733325700343/Put/seqid=0 2024-12-04T15:21:41,876 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#74 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:41,877 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/1a8bbac878d54e6aa430f6eeba442f30 is 50, key is test_row_0/B:col10/1733325700343/Put/seqid=0 2024-12-04T15:21:41,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741912_1088 (size=13153) 2024-12-04T15:21:41,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:41,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-04T15:21:41,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,891 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-04T15:21:41,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:41,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:41,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:41,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:41,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:41,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:41,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741913_1089 (size=13153) 2024-12-04T15:21:41,900 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7d1ca61540d54a6c9d7dfa4319e1cb71 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7d1ca61540d54a6c9d7dfa4319e1cb71 2024-12-04T15:21:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/fd3d3aa51fb14ea8b8efd3b42b1767a0 is 50, key is test_row_0/A:col10/1733325700489/Put/seqid=0 2024-12-04T15:21:41,918 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/1a8bbac878d54e6aa430f6eeba442f30 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1a8bbac878d54e6aa430f6eeba442f30 2024-12-04T15:21:41,923 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 7d1ca61540d54a6c9d7dfa4319e1cb71(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:41,923 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:41,923 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325701847; duration=0sec 2024-12-04T15:21:41,924 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:41,924 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:41,925 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:41,927 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:41,927 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:41,927 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:41,927 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7828b917b3e9459a99152742bc57ae80, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2cbde14c6f3147b7b22d6dc6895adef6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e7212d5576a441259fdfcdeef258e18b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.7 K 2024-12-04T15:21:41,929 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7828b917b3e9459a99152742bc57ae80, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325699220 2024-12-04T15:21:41,929 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 1a8bbac878d54e6aa430f6eeba442f30(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:41,929 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cbde14c6f3147b7b22d6dc6895adef6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325699940 2024-12-04T15:21:41,929 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:41,930 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7212d5576a441259fdfcdeef258e18b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325700343 2024-12-04T15:21:41,930 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325701847; duration=0sec 2024-12-04T15:21:41,931 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:41,931 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:41,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741914_1090 (size=12301) 2024-12-04T15:21:41,946 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:41,947 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/0cf340636d43473bb4e647022aac792e is 50, key is test_row_0/C:col10/1733325700343/Put/seqid=0 2024-12-04T15:21:41,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741915_1091 (size=13085) 2024-12-04T15:21:41,982 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/0cf340636d43473bb4e647022aac792e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0cf340636d43473bb4e647022aac792e 2024-12-04T15:21:41,995 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 0cf340636d43473bb4e647022aac792e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:41,995 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:41,995 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=13, startTime=1733325701848; duration=0sec 2024-12-04T15:21:41,996 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:41,996 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-04T15:21:42,334 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/fd3d3aa51fb14ea8b8efd3b42b1767a0 2024-12-04T15:21:42,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/b97bc7ba9bf64ce1bccb923e21214640 is 50, key is test_row_0/B:col10/1733325700489/Put/seqid=0 2024-12-04T15:21:42,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741916_1092 (size=12301) 2024-12-04T15:21:42,373 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/b97bc7ba9bf64ce1bccb923e21214640 2024-12-04T15:21:42,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/6394a36f4a564cd9b9825555a5ddd853 is 50, key is test_row_0/C:col10/1733325700489/Put/seqid=0 2024-12-04T15:21:42,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741917_1093 (size=12301) 2024-12-04T15:21:42,399 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/6394a36f4a564cd9b9825555a5ddd853 2024-12-04T15:21:42,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/fd3d3aa51fb14ea8b8efd3b42b1767a0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd3d3aa51fb14ea8b8efd3b42b1767a0 2024-12-04T15:21:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:42,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:42,418 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd3d3aa51fb14ea8b8efd3b42b1767a0, entries=150, sequenceid=366, filesize=12.0 K 2024-12-04T15:21:42,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/b97bc7ba9bf64ce1bccb923e21214640 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/b97bc7ba9bf64ce1bccb923e21214640 2024-12-04T15:21:42,451 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/b97bc7ba9bf64ce1bccb923e21214640, entries=150, sequenceid=366, filesize=12.0 K 2024-12-04T15:21:42,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/6394a36f4a564cd9b9825555a5ddd853 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/6394a36f4a564cd9b9825555a5ddd853 2024-12-04T15:21:42,462 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/6394a36f4a564cd9b9825555a5ddd853, entries=150, sequenceid=366, filesize=12.0 K 2024-12-04T15:21:42,512 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=80.51 KB/82440 for e2e19d2bb9bfcadbc1f5e0b910706700 in 621ms, sequenceid=366, compaction requested=false 2024-12-04T15:21:42,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:42,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:42,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-04T15:21:42,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-04T15:21:42,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-04T15:21:42,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:42,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:42,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:42,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:42,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:42,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:42,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-04T15:21:42,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5540 sec 2024-12-04T15:21:42,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.5630 sec 2024-12-04T15:21:42,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ccd53799ea3d4d718a9de6a461c13e53 is 50, key is test_row_0/A:col10/1733325702472/Put/seqid=0 2024-12-04T15:21:42,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741918_1094 (size=14741) 2024-12-04T15:21:42,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325762584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325762597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325762598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325762598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325762622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325762700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325762703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325762704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325762704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325762906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325762906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325762908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325762910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:42,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ccd53799ea3d4d718a9de6a461c13e53 2024-12-04T15:21:42,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/3f66956edf314661a672e40e546b5fb6 is 50, key is test_row_0/B:col10/1733325702472/Put/seqid=0 2024-12-04T15:21:42,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741919_1095 (size=12301) 2024-12-04T15:21:42,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/3f66956edf314661a672e40e546b5fb6 2024-12-04T15:21:43,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/164c295e679c49a48f668cc29de052c9 is 50, key is test_row_0/C:col10/1733325702472/Put/seqid=0 2024-12-04T15:21:43,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741920_1096 (size=12301) 2024-12-04T15:21:43,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/164c295e679c49a48f668cc29de052c9 2024-12-04T15:21:43,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/ccd53799ea3d4d718a9de6a461c13e53 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ccd53799ea3d4d718a9de6a461c13e53 2024-12-04T15:21:43,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ccd53799ea3d4d718a9de6a461c13e53, entries=200, sequenceid=387, filesize=14.4 K 2024-12-04T15:21:43,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/3f66956edf314661a672e40e546b5fb6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/3f66956edf314661a672e40e546b5fb6 2024-12-04T15:21:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-04T15:21:43,066 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-04T15:21:43,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/3f66956edf314661a672e40e546b5fb6, entries=150, sequenceid=387, filesize=12.0 K 2024-12-04T15:21:43,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/164c295e679c49a48f668cc29de052c9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/164c295e679c49a48f668cc29de052c9 2024-12-04T15:21:43,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:43,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-04T15:21:43,074 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:43,074 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:43,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:43,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/164c295e679c49a48f668cc29de052c9, entries=150, sequenceid=387, filesize=12.0 K 2024-12-04T15:21:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-04T15:21:43,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=114.05 KB/116790 for e2e19d2bb9bfcadbc1f5e0b910706700 in 565ms, sequenceid=387, compaction requested=true 2024-12-04T15:21:43,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:43,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:43,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:43,078 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:43,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:43,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:43,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:43,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:21:43,078 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:43,080 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:43,080 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:43,080 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,080 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1a8bbac878d54e6aa430f6eeba442f30, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/b97bc7ba9bf64ce1bccb923e21214640, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/3f66956edf314661a672e40e546b5fb6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.9 K 2024-12-04T15:21:43,081 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:43,081 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:43,081 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,081 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a8bbac878d54e6aa430f6eeba442f30, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325700343 2024-12-04T15:21:43,081 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7d1ca61540d54a6c9d7dfa4319e1cb71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd3d3aa51fb14ea8b8efd3b42b1767a0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ccd53799ea3d4d718a9de6a461c13e53] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=39.3 K 2024-12-04T15:21:43,083 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b97bc7ba9bf64ce1bccb923e21214640, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733325700477 2024-12-04T15:21:43,083 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d1ca61540d54a6c9d7dfa4319e1cb71, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325700343 2024-12-04T15:21:43,083 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f66956edf314661a672e40e546b5fb6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325702459 2024-12-04T15:21:43,083 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd3d3aa51fb14ea8b8efd3b42b1767a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733325700477 2024-12-04T15:21:43,085 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccd53799ea3d4d718a9de6a461c13e53, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325702459 2024-12-04T15:21:43,104 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#82 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:43,105 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/e3ef428f213144cfbf1c959d3b3ac2a8 is 50, key is test_row_0/B:col10/1733325702472/Put/seqid=0 2024-12-04T15:21:43,105 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#83 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:43,106 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7e1bbf12a8bc4beb872938ebaa6079ff is 50, key is test_row_0/A:col10/1733325702472/Put/seqid=0 2024-12-04T15:21:43,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741921_1097 (size=13255) 2024-12-04T15:21:43,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741922_1098 (size=13255) 2024-12-04T15:21:43,155 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/e3ef428f213144cfbf1c959d3b3ac2a8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e3ef428f213144cfbf1c959d3b3ac2a8 2024-12-04T15:21:43,164 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into e3ef428f213144cfbf1c959d3b3ac2a8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:43,164 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:43,164 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325703078; duration=0sec 2024-12-04T15:21:43,164 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:43,164 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:43,164 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:43,166 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:43,167 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:43,167 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,167 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0cf340636d43473bb4e647022aac792e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/6394a36f4a564cd9b9825555a5ddd853, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/164c295e679c49a48f668cc29de052c9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=36.8 K 2024-12-04T15:21:43,167 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cf340636d43473bb4e647022aac792e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325700343 2024-12-04T15:21:43,168 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6394a36f4a564cd9b9825555a5ddd853, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733325700477 2024-12-04T15:21:43,170 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 164c295e679c49a48f668cc29de052c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325702459 2024-12-04T15:21:43,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-04T15:21:43,191 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:43,193 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2b1789977c254fe6af392b4a977d1c15 is 50, key is test_row_0/C:col10/1733325702472/Put/seqid=0 2024-12-04T15:21:43,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741923_1099 (size=13187) 2024-12-04T15:21:43,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:43,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:21:43,225 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2b1789977c254fe6af392b4a977d1c15 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2b1789977c254fe6af392b4a977d1c15 2024-12-04T15:21:43,228 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:43,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-04T15:21:43,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:43,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:43,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:43,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:43,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:43,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:43,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,237 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 2b1789977c254fe6af392b4a977d1c15(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:43,237 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:43,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/1c10aff18a834ac2bc784baae691c5ca is 50, key is test_row_0/A:col10/1733325703215/Put/seqid=0 2024-12-04T15:21:43,237 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=13, startTime=1733325703078; duration=0sec 2024-12-04T15:21:43,238 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:43,238 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:43,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741924_1100 (size=12301) 2024-12-04T15:21:43,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325763250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325763251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325763261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325763262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325763362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325763362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325763367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325763368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-04T15:21:43,383 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-04T15:21:43,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:43,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,541 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-04T15:21:43,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:43,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,550 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7e1bbf12a8bc4beb872938ebaa6079ff as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7e1bbf12a8bc4beb872938ebaa6079ff 2024-12-04T15:21:43,560 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 7e1bbf12a8bc4beb872938ebaa6079ff(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:43,560 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:43,560 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325703078; duration=0sec 2024-12-04T15:21:43,560 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:43,560 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:43,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325763565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325763569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325763572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325763572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/1c10aff18a834ac2bc784baae691c5ca 2024-12-04T15:21:43,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-04T15:21:43,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/038c510a040d41bf9676f27ce87c0cb3 is 50, key is test_row_0/B:col10/1733325703215/Put/seqid=0 2024-12-04T15:21:43,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741925_1101 (size=12301) 2024-12-04T15:21:43,698 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-04T15:21:43,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:43,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/038c510a040d41bf9676f27ce87c0cb3 2024-12-04T15:21:43,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7b9ea51a879442f4b55e033da9414890 is 50, key is test_row_0/C:col10/1733325703215/Put/seqid=0 2024-12-04T15:21:43,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741926_1102 (size=12301) 2024-12-04T15:21:43,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7b9ea51a879442f4b55e033da9414890 2024-12-04T15:21:43,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/1c10aff18a834ac2bc784baae691c5ca as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1c10aff18a834ac2bc784baae691c5ca 2024-12-04T15:21:43,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1c10aff18a834ac2bc784baae691c5ca, entries=150, sequenceid=411, filesize=12.0 K 2024-12-04T15:21:43,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/038c510a040d41bf9676f27ce87c0cb3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/038c510a040d41bf9676f27ce87c0cb3 2024-12-04T15:21:43,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/038c510a040d41bf9676f27ce87c0cb3, entries=150, sequenceid=411, filesize=12.0 K 2024-12-04T15:21:43,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7b9ea51a879442f4b55e033da9414890 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7b9ea51a879442f4b55e033da9414890 2024-12-04T15:21:43,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7b9ea51a879442f4b55e033da9414890, entries=150, sequenceid=411, filesize=12.0 K 2024-12-04T15:21:43,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e2e19d2bb9bfcadbc1f5e0b910706700 in 585ms, sequenceid=411, compaction requested=false 2024-12-04T15:21:43,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:43,852 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-04T15:21:43,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:43,853 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:21:43,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:43,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:43,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:43,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:43,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:43,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:43,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/f4283cd0348645199bbb154a0f8fda55 is 50, key is test_row_0/A:col10/1733325703244/Put/seqid=0 2024-12-04T15:21:43,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:43,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:43,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741927_1103 (size=12301) 2024-12-04T15:21:43,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325763907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325763909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325763910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:43,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:43,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325763911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325764013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325764015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325764016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325764024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-04T15:21:44,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325764216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325764221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325764218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325764230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,285 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/f4283cd0348645199bbb154a0f8fda55 2024-12-04T15:21:44,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/8c247cd4d69c4691bfd0420ea1ab1a92 is 50, key is test_row_0/B:col10/1733325703244/Put/seqid=0 2024-12-04T15:21:44,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741928_1104 (size=12301) 2024-12-04T15:21:44,332 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/8c247cd4d69c4691bfd0420ea1ab1a92 2024-12-04T15:21:44,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8dbcea4e817b48afb7feb11339fe2e41 is 50, key is test_row_0/C:col10/1733325703244/Put/seqid=0 2024-12-04T15:21:44,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741929_1105 (size=12301) 2024-12-04T15:21:44,397 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8dbcea4e817b48afb7feb11339fe2e41 2024-12-04T15:21:44,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/f4283cd0348645199bbb154a0f8fda55 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/f4283cd0348645199bbb154a0f8fda55 2024-12-04T15:21:44,433 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/f4283cd0348645199bbb154a0f8fda55, entries=150, sequenceid=426, filesize=12.0 K 2024-12-04T15:21:44,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/8c247cd4d69c4691bfd0420ea1ab1a92 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/8c247cd4d69c4691bfd0420ea1ab1a92 2024-12-04T15:21:44,464 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/8c247cd4d69c4691bfd0420ea1ab1a92, entries=150, sequenceid=426, filesize=12.0 K 2024-12-04T15:21:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/8dbcea4e817b48afb7feb11339fe2e41 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dbcea4e817b48afb7feb11339fe2e41 2024-12-04T15:21:44,488 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dbcea4e817b48afb7feb11339fe2e41, entries=150, sequenceid=426, filesize=12.0 K 2024-12-04T15:21:44,493 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e2e19d2bb9bfcadbc1f5e0b910706700 in 640ms, sequenceid=426, compaction requested=true 2024-12-04T15:21:44,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:44,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:44,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-04T15:21:44,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-04T15:21:44,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-04T15:21:44,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4200 sec 2024-12-04T15:21:44,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.4270 sec 2024-12-04T15:21:44,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:21:44,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:44,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:44,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:44,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:44,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:44,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:44,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:44,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/736471009c5b47cca87b635eacc25833 is 50, key is test_row_0/A:col10/1733325703909/Put/seqid=0 2024-12-04T15:21:44,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741930_1106 (size=12301) 2024-12-04T15:21:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325764587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325764590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325764591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325764592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325764633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,636 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:21:44,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325764697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325764700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325764700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325764700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325764899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325764909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325764909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:44,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325764909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:44,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/736471009c5b47cca87b635eacc25833 2024-12-04T15:21:44,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/48977ed27b614ea78961ce3b3e14e118 is 50, key is test_row_0/B:col10/1733325703909/Put/seqid=0 2024-12-04T15:21:45,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741931_1107 (size=12301) 2024-12-04T15:21:45,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/48977ed27b614ea78961ce3b3e14e118 2024-12-04T15:21:45,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/84e134630c80464db36aa44c2cdf70a4 is 50, key is test_row_0/C:col10/1733325703909/Put/seqid=0 2024-12-04T15:21:45,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741932_1108 (size=12301) 2024-12-04T15:21:45,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-04T15:21:45,183 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-04T15:21:45,186 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:45,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-04T15:21:45,189 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:45,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-04T15:21:45,190 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:45,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:45,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325765209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325765212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325765216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325765221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-04T15:21:45,343 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-04T15:21:45,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:45,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:45,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:45,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:45,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/84e134630c80464db36aa44c2cdf70a4 2024-12-04T15:21:45,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/736471009c5b47cca87b635eacc25833 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/736471009c5b47cca87b635eacc25833 2024-12-04T15:21:45,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-04T15:21:45,498 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-04T15:21:45,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:45,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:45,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:45,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:45,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/736471009c5b47cca87b635eacc25833, entries=150, sequenceid=450, filesize=12.0 K 2024-12-04T15:21:45,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/48977ed27b614ea78961ce3b3e14e118 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/48977ed27b614ea78961ce3b3e14e118 2024-12-04T15:21:45,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/48977ed27b614ea78961ce3b3e14e118, entries=150, sequenceid=450, filesize=12.0 K 2024-12-04T15:21:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/84e134630c80464db36aa44c2cdf70a4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/84e134630c80464db36aa44c2cdf70a4 2024-12-04T15:21:45,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/84e134630c80464db36aa44c2cdf70a4, entries=150, sequenceid=450, filesize=12.0 K 2024-12-04T15:21:45,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e2e19d2bb9bfcadbc1f5e0b910706700 in 1009ms, sequenceid=450, compaction requested=true 2024-12-04T15:21:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:45,537 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:45,537 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:45,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:45,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:45,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:45,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:45,543 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:45,543 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:45,543 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,544 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7e1bbf12a8bc4beb872938ebaa6079ff, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1c10aff18a834ac2bc784baae691c5ca, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/f4283cd0348645199bbb154a0f8fda55, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/736471009c5b47cca87b635eacc25833] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=49.0 K 2024-12-04T15:21:45,544 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:45,544 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:45,544 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,545 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e3ef428f213144cfbf1c959d3b3ac2a8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/038c510a040d41bf9676f27ce87c0cb3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/8c247cd4d69c4691bfd0420ea1ab1a92, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/48977ed27b614ea78961ce3b3e14e118] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=49.0 K 2024-12-04T15:21:45,545 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e3ef428f213144cfbf1c959d3b3ac2a8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325702459 2024-12-04T15:21:45,546 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 038c510a040d41bf9676f27ce87c0cb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733325703212 2024-12-04T15:21:45,546 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c247cd4d69c4691bfd0420ea1ab1a92, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733325703244 2024-12-04T15:21:45,547 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 48977ed27b614ea78961ce3b3e14e118, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1733325703909 2024-12-04T15:21:45,547 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e1bbf12a8bc4beb872938ebaa6079ff, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325702459 2024-12-04T15:21:45,548 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c10aff18a834ac2bc784baae691c5ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733325703212 2024-12-04T15:21:45,548 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4283cd0348645199bbb154a0f8fda55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733325703244 2024-12-04T15:21:45,549 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 736471009c5b47cca87b635eacc25833, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1733325703909 2024-12-04T15:21:45,569 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:45,570 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/86f98b20d86944ec8b1d54387ba60046 is 50, key is test_row_0/B:col10/1733325703909/Put/seqid=0 2024-12-04T15:21:45,588 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:45,589 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/5e4fd79f0a1a4fafa75dac34f0672397 is 50, key is test_row_0/A:col10/1733325703909/Put/seqid=0 2024-12-04T15:21:45,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741933_1109 (size=13391) 2024-12-04T15:21:45,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741934_1110 (size=13391) 2024-12-04T15:21:45,628 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/5e4fd79f0a1a4fafa75dac34f0672397 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/5e4fd79f0a1a4fafa75dac34f0672397 2024-12-04T15:21:45,636 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 5e4fd79f0a1a4fafa75dac34f0672397(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:45,636 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:45,636 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=12, startTime=1733325705537; duration=0sec 2024-12-04T15:21:45,636 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:45,636 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:45,636 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:45,640 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:45,641 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:45,641 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,641 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2b1789977c254fe6af392b4a977d1c15, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7b9ea51a879442f4b55e033da9414890, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dbcea4e817b48afb7feb11339fe2e41, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/84e134630c80464db36aa44c2cdf70a4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=48.9 K 2024-12-04T15:21:45,641 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b1789977c254fe6af392b4a977d1c15, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325702459 2024-12-04T15:21:45,642 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b9ea51a879442f4b55e033da9414890, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733325703212 2024-12-04T15:21:45,643 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dbcea4e817b48afb7feb11339fe2e41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733325703244 2024-12-04T15:21:45,643 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84e134630c80464db36aa44c2cdf70a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1733325703909 2024-12-04T15:21:45,653 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-04T15:21:45,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:45,654 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:21:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:45,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:45,667 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#96 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:45,668 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b0a1aa4a81e04921bb55767f20449fb9 is 50, key is test_row_0/C:col10/1733325703909/Put/seqid=0 2024-12-04T15:21:45,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/2b8f8c7c47774d48ba6796d5820b82c4 is 50, key is test_row_0/A:col10/1733325704590/Put/seqid=0 2024-12-04T15:21:45,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741935_1111 (size=13323) 2024-12-04T15:21:45,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741936_1112 (size=12301) 2024-12-04T15:21:45,712 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/b0a1aa4a81e04921bb55767f20449fb9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b0a1aa4a81e04921bb55767f20449fb9 2024-12-04T15:21:45,722 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into b0a1aa4a81e04921bb55767f20449fb9(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:45,722 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:45,722 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=12, startTime=1733325705541; duration=0sec 2024-12-04T15:21:45,723 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:45,723 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:45,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:45,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325765760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325765767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325765772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325765772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-04T15:21:45,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325765882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325765881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325765889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:45,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325765888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,040 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/86f98b20d86944ec8b1d54387ba60046 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86f98b20d86944ec8b1d54387ba60046 2024-12-04T15:21:46,061 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 86f98b20d86944ec8b1d54387ba60046(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:46,061 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:46,061 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=12, startTime=1733325705537; duration=0sec 2024-12-04T15:21:46,061 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:46,061 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:46,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325766085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325766086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325766096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,110 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/2b8f8c7c47774d48ba6796d5820b82c4 2024-12-04T15:21:46,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325766111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/e1271bf4b9c444038408aa65326e1d0d is 50, key is test_row_0/B:col10/1733325704590/Put/seqid=0 2024-12-04T15:21:46,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741937_1113 (size=12301) 2024-12-04T15:21:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-04T15:21:46,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325766393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325766393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325766401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325766420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,609 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/e1271bf4b9c444038408aa65326e1d0d 2024-12-04T15:21:46,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/586407996d644cdcaa2b741c2ae309b8 is 50, key is test_row_0/C:col10/1733325704590/Put/seqid=0 2024-12-04T15:21:46,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741938_1114 (size=12301) 2024-12-04T15:21:46,680 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/586407996d644cdcaa2b741c2ae309b8 2024-12-04T15:21:46,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/2b8f8c7c47774d48ba6796d5820b82c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2b8f8c7c47774d48ba6796d5820b82c4 2024-12-04T15:21:46,693 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2b8f8c7c47774d48ba6796d5820b82c4, entries=150, sequenceid=463, filesize=12.0 K 2024-12-04T15:21:46,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/e1271bf4b9c444038408aa65326e1d0d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e1271bf4b9c444038408aa65326e1d0d 2024-12-04T15:21:46,705 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e1271bf4b9c444038408aa65326e1d0d, entries=150, sequenceid=463, filesize=12.0 K 2024-12-04T15:21:46,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/586407996d644cdcaa2b741c2ae309b8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/586407996d644cdcaa2b741c2ae309b8 2024-12-04T15:21:46,713 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/586407996d644cdcaa2b741c2ae309b8, entries=150, sequenceid=463, filesize=12.0 K 2024-12-04T15:21:46,724 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e2e19d2bb9bfcadbc1f5e0b910706700 in 1069ms, sequenceid=463, compaction requested=false 2024-12-04T15:21:46,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:46,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:46,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-04T15:21:46,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-04T15:21:46,760 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-04T15:21:46,760 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5500 sec 2024-12-04T15:21:46,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.5790 sec 2024-12-04T15:21:46,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:46,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:21:46,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:46,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:46,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:46,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:46,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:46,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:46,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/07eb0784a0d0404d82cd87319c7e377c is 50, key is test_row_0/A:col10/1733325705763/Put/seqid=0 2024-12-04T15:21:46,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325766936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325766941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325766941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325766944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:46,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741939_1115 (size=12301) 2024-12-04T15:21:46,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/07eb0784a0d0404d82cd87319c7e377c 2024-12-04T15:21:46,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/266b5549d537417d9433f2a51e3691ed is 50, key is test_row_0/B:col10/1733325705763/Put/seqid=0 2024-12-04T15:21:47,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741940_1116 (size=12301) 2024-12-04T15:21:47,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/266b5549d537417d9433f2a51e3691ed 2024-12-04T15:21:47,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/34265c5c7dbe4a78b2dc384b482d23be is 50, key is test_row_0/C:col10/1733325705763/Put/seqid=0 2024-12-04T15:21:47,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325767053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325767053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325767053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325767051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741941_1117 (size=12301) 2024-12-04T15:21:47,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325767262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325767263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325767264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325767265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-04T15:21:47,296 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-04T15:21:47,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:47,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-04T15:21:47,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-04T15:21:47,309 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:47,310 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:47,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-04T15:21:47,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:47,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:47,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/34265c5c7dbe4a78b2dc384b482d23be 2024-12-04T15:21:47,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/07eb0784a0d0404d82cd87319c7e377c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/07eb0784a0d0404d82cd87319c7e377c 2024-12-04T15:21:47,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/07eb0784a0d0404d82cd87319c7e377c, entries=150, sequenceid=490, filesize=12.0 K 2024-12-04T15:21:47,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/266b5549d537417d9433f2a51e3691ed as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/266b5549d537417d9433f2a51e3691ed 2024-12-04T15:21:47,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325767566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325767569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/266b5549d537417d9433f2a51e3691ed, entries=150, sequenceid=490, filesize=12.0 K 2024-12-04T15:21:47,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/34265c5c7dbe4a78b2dc384b482d23be as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/34265c5c7dbe4a78b2dc384b482d23be 2024-12-04T15:21:47,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/34265c5c7dbe4a78b2dc384b482d23be, entries=150, sequenceid=490, filesize=12.0 K 2024-12-04T15:21:47,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for e2e19d2bb9bfcadbc1f5e0b910706700 in 681ms, sequenceid=490, compaction requested=true 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:21:47,583 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:47,584 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:47,585 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:47,585 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:47,585 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,585 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/5e4fd79f0a1a4fafa75dac34f0672397, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2b8f8c7c47774d48ba6796d5820b82c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/07eb0784a0d0404d82cd87319c7e377c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=37.1 K 2024-12-04T15:21:47,586 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e4fd79f0a1a4fafa75dac34f0672397, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1733325703909 2024-12-04T15:21:47,587 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b8f8c7c47774d48ba6796d5820b82c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733325704576 2024-12-04T15:21:47,587 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07eb0784a0d0404d82cd87319c7e377c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733325705760 2024-12-04T15:21:47,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:47,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:21:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:47,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:47,593 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:47,593 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:47,593 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,593 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b0a1aa4a81e04921bb55767f20449fb9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/586407996d644cdcaa2b741c2ae309b8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/34265c5c7dbe4a78b2dc384b482d23be] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=37.0 K 2024-12-04T15:21:47,595 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b0a1aa4a81e04921bb55767f20449fb9, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1733325703909 2024-12-04T15:21:47,596 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 586407996d644cdcaa2b741c2ae309b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733325704576 2024-12-04T15:21:47,597 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 34265c5c7dbe4a78b2dc384b482d23be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733325705760 2024-12-04T15:21:47,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/148f7574af7049d4b1d3347fa455a453 is 50, key is test_row_0/A:col10/1733325707585/Put/seqid=0 2024-12-04T15:21:47,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-04T15:21:47,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,624 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:47,625 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7bd08a4fa9a8469bb1c2c73328d97c0c is 50, key is test_row_0/A:col10/1733325705763/Put/seqid=0 2024-12-04T15:21:47,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:47,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:47,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,654 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#105 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:47,655 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2e9d4beb45e045349726ceed9d1a2bd2 is 50, key is test_row_0/C:col10/1733325705763/Put/seqid=0 2024-12-04T15:21:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741942_1118 (size=9857) 2024-12-04T15:21:47,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/148f7574af7049d4b1d3347fa455a453 2024-12-04T15:21:47,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/ccde8e307095425691ba49bde19a76d0 is 50, key is test_row_0/B:col10/1733325707585/Put/seqid=0 2024-12-04T15:21:47,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325767696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325767704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741943_1119 (size=13493) 2024-12-04T15:21:47,745 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/7bd08a4fa9a8469bb1c2c73328d97c0c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7bd08a4fa9a8469bb1c2c73328d97c0c 2024-12-04T15:21:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741944_1120 (size=13425) 2024-12-04T15:21:47,769 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2e9d4beb45e045349726ceed9d1a2bd2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2e9d4beb45e045349726ceed9d1a2bd2 2024-12-04T15:21:47,773 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 7bd08a4fa9a8469bb1c2c73328d97c0c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:47,773 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:47,774 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=13, startTime=1733325707582; duration=0sec 2024-12-04T15:21:47,774 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:47,774 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:47,774 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:21:47,777 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 2e9d4beb45e045349726ceed9d1a2bd2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:47,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:47,777 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=13, startTime=1733325707582; duration=0sec 2024-12-04T15:21:47,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:47,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:47,778 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:21:47,778 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:47,780 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,780 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86f98b20d86944ec8b1d54387ba60046, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e1271bf4b9c444038408aa65326e1d0d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/266b5549d537417d9433f2a51e3691ed] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=37.1 K 2024-12-04T15:21:47,780 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86f98b20d86944ec8b1d54387ba60046, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1733325703909 2024-12-04T15:21:47,781 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1271bf4b9c444038408aa65326e1d0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=463, earliestPutTs=1733325704576 2024-12-04T15:21:47,781 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 266b5549d537417d9433f2a51e3691ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733325705760 2024-12-04T15:21:47,793 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:47,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:47,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,810 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#107 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:47,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741945_1121 (size=9857) 2024-12-04T15:21:47,811 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/92c99a1ae6b044298d3d4757d099fed0 is 50, key is test_row_0/B:col10/1733325705763/Put/seqid=0 2024-12-04T15:21:47,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325767807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:47,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325767808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741946_1122 (size=13493) 2024-12-04T15:21:47,895 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/92c99a1ae6b044298d3d4757d099fed0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92c99a1ae6b044298d3d4757d099fed0 2024-12-04T15:21:47,904 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 92c99a1ae6b044298d3d4757d099fed0(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:47,904 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:47,904 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=13, startTime=1733325707582; duration=0sec 2024-12-04T15:21:47,904 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:47,904 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:47,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-04T15:21:47,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:47,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:47,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:47,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:47,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325768014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325768020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325768070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325768076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,115 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:48,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:48,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:48,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:48,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/ccde8e307095425691ba49bde19a76d0 2024-12-04T15:21:48,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/aaee37cf9fde4b629ef444852fe39aa7 is 50, key is test_row_0/C:col10/1733325707585/Put/seqid=0 2024-12-04T15:21:48,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741947_1123 (size=9857) 2024-12-04T15:21:48,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/aaee37cf9fde4b629ef444852fe39aa7 2024-12-04T15:21:48,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/148f7574af7049d4b1d3347fa455a453 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/148f7574af7049d4b1d3347fa455a453 2024-12-04T15:21:48,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/148f7574af7049d4b1d3347fa455a453, entries=100, sequenceid=502, filesize=9.6 K 2024-12-04T15:21:48,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/ccde8e307095425691ba49bde19a76d0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ccde8e307095425691ba49bde19a76d0 2024-12-04T15:21:48,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:48,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:48,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:48,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:48,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:48,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ccde8e307095425691ba49bde19a76d0, entries=100, sequenceid=502, filesize=9.6 K 2024-12-04T15:21:48,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/aaee37cf9fde4b629ef444852fe39aa7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/aaee37cf9fde4b629ef444852fe39aa7 2024-12-04T15:21:48,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325768326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325768332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/aaee37cf9fde4b629ef444852fe39aa7, entries=100, sequenceid=502, filesize=9.6 K 2024-12-04T15:21:48,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e2e19d2bb9bfcadbc1f5e0b910706700 in 748ms, sequenceid=502, compaction requested=false 2024-12-04T15:21:48,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:48,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-04T15:21:48,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-04T15:21:48,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:48,433 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:21:48,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:48,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:48,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:48,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:48,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:48,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:48,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a9f6d369632c466b95f586a1259a20e8 is 50, key is test_row_0/A:col10/1733325707692/Put/seqid=0 2024-12-04T15:21:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741948_1124 (size=12301) 2024-12-04T15:21:48,493 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a9f6d369632c466b95f586a1259a20e8 2024-12-04T15:21:48,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0899065261d4400292dae0a1b3634ae1 is 50, key is test_row_0/B:col10/1733325707692/Put/seqid=0 2024-12-04T15:21:48,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741949_1125 (size=12301) 2024-12-04T15:21:48,547 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0899065261d4400292dae0a1b3634ae1 2024-12-04T15:21:48,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7defe4aca756400a8fe6ca89484b5134 is 50, key is test_row_0/C:col10/1733325707692/Put/seqid=0 2024-12-04T15:21:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741950_1126 (size=12301) 2024-12-04T15:21:48,629 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7defe4aca756400a8fe6ca89484b5134 2024-12-04T15:21:48,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/a9f6d369632c466b95f586a1259a20e8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a9f6d369632c466b95f586a1259a20e8 2024-12-04T15:21:48,641 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a9f6d369632c466b95f586a1259a20e8, entries=150, sequenceid=529, filesize=12.0 K 2024-12-04T15:21:48,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/0899065261d4400292dae0a1b3634ae1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0899065261d4400292dae0a1b3634ae1 2024-12-04T15:21:48,648 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0899065261d4400292dae0a1b3634ae1, entries=150, sequenceid=529, filesize=12.0 K 2024-12-04T15:21:48,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/7defe4aca756400a8fe6ca89484b5134 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7defe4aca756400a8fe6ca89484b5134 2024-12-04T15:21:48,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:48,655 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7defe4aca756400a8fe6ca89484b5134, entries=150, sequenceid=529, filesize=12.0 K 2024-12-04T15:21:48,657 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=13.42 KB/13740 for e2e19d2bb9bfcadbc1f5e0b910706700 in 223ms, sequenceid=529, compaction requested=true 2024-12-04T15:21:48,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:48,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:48,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-04T15:21:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-04T15:21:48,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-04T15:21:48,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3480 sec 2024-12-04T15:21:48,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.3620 sec 2024-12-04T15:21:48,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:21:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:48,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:48,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:48,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:48,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:48,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:48,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/117af1f8a7184455af9b0b12c6337aa2 is 50, key is test_row_0/A:col10/1733325708675/Put/seqid=0 2024-12-04T15:21:48,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741951_1127 (size=12301) 2024-12-04T15:21:48,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/117af1f8a7184455af9b0b12c6337aa2 2024-12-04T15:21:48,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/409e45ea2ebd47768cf697f330c84816 is 50, key is test_row_0/B:col10/1733325708675/Put/seqid=0 2024-12-04T15:21:48,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741952_1128 (size=12301) 2024-12-04T15:21:48,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/409e45ea2ebd47768cf697f330c84816 2024-12-04T15:21:48,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325768878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2a5e01c6416a4116b5fbb71d0ef474d7 is 50, key is test_row_0/C:col10/1733325708675/Put/seqid=0 2024-12-04T15:21:48,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325768880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325768884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741953_1129 (size=12301) 2024-12-04T15:21:48,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325768988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:48,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:48,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325768991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325768997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55734 deadline: 1733325769085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55748 deadline: 1733325769096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325769191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325769194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325769208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2a5e01c6416a4116b5fbb71d0ef474d7 2024-12-04T15:21:49,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/117af1f8a7184455af9b0b12c6337aa2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/117af1f8a7184455af9b0b12c6337aa2 2024-12-04T15:21:49,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/117af1f8a7184455af9b0b12c6337aa2, entries=150, sequenceid=540, filesize=12.0 K 2024-12-04T15:21:49,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/409e45ea2ebd47768cf697f330c84816 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/409e45ea2ebd47768cf697f330c84816 2024-12-04T15:21:49,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/409e45ea2ebd47768cf697f330c84816, entries=150, sequenceid=540, filesize=12.0 K 2024-12-04T15:21:49,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/2a5e01c6416a4116b5fbb71d0ef474d7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2a5e01c6416a4116b5fbb71d0ef474d7 2024-12-04T15:21:49,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-04T15:21:49,413 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-04T15:21:49,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:49,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-04T15:21:49,418 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:49,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2a5e01c6416a4116b5fbb71d0ef474d7, entries=150, sequenceid=540, filesize=12.0 K 2024-12-04T15:21:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-04T15:21:49,419 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:49,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:49,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for e2e19d2bb9bfcadbc1f5e0b910706700 in 718ms, sequenceid=540, compaction requested=true 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:49,419 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:21:49,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:21:49,420 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:49,427 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47952 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:49,427 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/A is initiating minor compaction (all files) 2024-12-04T15:21:49,427 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/A in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,428 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7bd08a4fa9a8469bb1c2c73328d97c0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/148f7574af7049d4b1d3347fa455a453, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a9f6d369632c466b95f586a1259a20e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/117af1f8a7184455af9b0b12c6337aa2] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=46.8 K 2024-12-04T15:21:49,430 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bd08a4fa9a8469bb1c2c73328d97c0c, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733325705760 2024-12-04T15:21:49,430 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47952 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:49,430 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/B is initiating minor compaction (all files) 2024-12-04T15:21:49,430 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/B in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,430 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92c99a1ae6b044298d3d4757d099fed0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ccde8e307095425691ba49bde19a76d0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0899065261d4400292dae0a1b3634ae1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/409e45ea2ebd47768cf697f330c84816] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=46.8 K 2024-12-04T15:21:49,431 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 148f7574af7049d4b1d3347fa455a453, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733325706940 2024-12-04T15:21:49,431 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 92c99a1ae6b044298d3d4757d099fed0, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733325705760 2024-12-04T15:21:49,432 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ccde8e307095425691ba49bde19a76d0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733325706940 2024-12-04T15:21:49,432 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9f6d369632c466b95f586a1259a20e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1733325707691 2024-12-04T15:21:49,432 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0899065261d4400292dae0a1b3634ae1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1733325707691 2024-12-04T15:21:49,432 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 117af1f8a7184455af9b0b12c6337aa2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733325708675 2024-12-04T15:21:49,432 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 409e45ea2ebd47768cf697f330c84816, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733325708675 2024-12-04T15:21:49,449 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#B#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:49,450 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/2749a3b9eea54aa6ae88c5d58c0f176d is 50, key is test_row_0/B:col10/1733325708675/Put/seqid=0 2024-12-04T15:21:49,455 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#A#compaction#116 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:49,456 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/04dccb22ec324e2a9a657d470d6bd6b2 is 50, key is test_row_0/A:col10/1733325708675/Put/seqid=0 2024-12-04T15:21:49,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741954_1130 (size=13629) 2024-12-04T15:21:49,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741955_1131 (size=13629) 2024-12-04T15:21:49,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:49,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-04T15:21:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:49,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/e8dfbd5836524630848165eddfba05f9 is 50, key is test_row_0/A:col10/1733325709503/Put/seqid=0 2024-12-04T15:21:49,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-04T15:21:49,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741956_1132 (size=14741) 2024-12-04T15:21:49,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=567 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/e8dfbd5836524630848165eddfba05f9 2024-12-04T15:21:49,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/86b6dbf0cdcd41aaa66c2d5e9d40667e is 50, key is test_row_0/B:col10/1733325709503/Put/seqid=0 2024-12-04T15:21:49,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741957_1133 (size=12301) 2024-12-04T15:21:49,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=567 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/86b6dbf0cdcd41aaa66c2d5e9d40667e 2024-12-04T15:21:49,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325769550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325769554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325769555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/14da791c8de44e22bdaaecc173a0250a is 50, key is test_row_0/C:col10/1733325709503/Put/seqid=0 2024-12-04T15:21:49,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741958_1134 (size=12301) 2024-12-04T15:21:49,570 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-04T15:21:49,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:49,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325769656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325769659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325769659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-04T15:21:49,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-04T15:21:49,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:49,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325769858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325769862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325769862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,880 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/2749a3b9eea54aa6ae88c5d58c0f176d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/2749a3b9eea54aa6ae88c5d58c0f176d 2024-12-04T15:21:49,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:49,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-04T15:21:49,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:49,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:49,895 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/04dccb22ec324e2a9a657d470d6bd6b2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/04dccb22ec324e2a9a657d470d6bd6b2 2024-12-04T15:21:49,897 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/B of e2e19d2bb9bfcadbc1f5e0b910706700 into 2749a3b9eea54aa6ae88c5d58c0f176d(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:49,897 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:49,897 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/B, priority=12, startTime=1733325709419; duration=0sec 2024-12-04T15:21:49,898 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:21:49,898 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:49,898 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:21:49,914 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47884 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:21:49,914 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e2e19d2bb9bfcadbc1f5e0b910706700/C is initiating minor compaction (all files) 2024-12-04T15:21:49,914 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2e19d2bb9bfcadbc1f5e0b910706700/C in TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:49,914 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2e9d4beb45e045349726ceed9d1a2bd2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/aaee37cf9fde4b629ef444852fe39aa7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7defe4aca756400a8fe6ca89484b5134, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2a5e01c6416a4116b5fbb71d0ef474d7] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp, totalSize=46.8 K 2024-12-04T15:21:49,916 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e9d4beb45e045349726ceed9d1a2bd2, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733325705760 2024-12-04T15:21:49,917 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting aaee37cf9fde4b629ef444852fe39aa7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733325706940 2024-12-04T15:21:49,917 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7defe4aca756400a8fe6ca89484b5134, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1733325707691 2024-12-04T15:21:49,918 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a5e01c6416a4116b5fbb71d0ef474d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733325708675 2024-12-04T15:21:49,921 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/A of e2e19d2bb9bfcadbc1f5e0b910706700 into 04dccb22ec324e2a9a657d470d6bd6b2(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:49,921 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:49,921 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/A, priority=12, startTime=1733325709419; duration=0sec 2024-12-04T15:21:49,921 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:49,921 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:49,928 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2e19d2bb9bfcadbc1f5e0b910706700#C#compaction#120 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:21:49,928 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/9dfcae00f8c445218bec06d35e7ce07e is 50, key is test_row_0/C:col10/1733325708675/Put/seqid=0 2024-12-04T15:21:49,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=567 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/14da791c8de44e22bdaaecc173a0250a 2024-12-04T15:21:49,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741959_1135 (size=13561) 2024-12-04T15:21:49,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/e8dfbd5836524630848165eddfba05f9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/e8dfbd5836524630848165eddfba05f9 2024-12-04T15:21:49,993 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/9dfcae00f8c445218bec06d35e7ce07e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/9dfcae00f8c445218bec06d35e7ce07e 2024-12-04T15:21:50,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/e8dfbd5836524630848165eddfba05f9, entries=200, sequenceid=567, filesize=14.4 K 2024-12-04T15:21:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/86b6dbf0cdcd41aaa66c2d5e9d40667e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86b6dbf0cdcd41aaa66c2d5e9d40667e 2024-12-04T15:21:50,013 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2e19d2bb9bfcadbc1f5e0b910706700/C of e2e19d2bb9bfcadbc1f5e0b910706700 into 9dfcae00f8c445218bec06d35e7ce07e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:21:50,013 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:50,013 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700., storeName=e2e19d2bb9bfcadbc1f5e0b910706700/C, priority=12, startTime=1733325709419; duration=0sec 2024-12-04T15:21:50,013 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:50,013 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:50,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86b6dbf0cdcd41aaa66c2d5e9d40667e, entries=150, sequenceid=567, filesize=12.0 K 2024-12-04T15:21:50,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/14da791c8de44e22bdaaecc173a0250a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/14da791c8de44e22bdaaecc173a0250a 2024-12-04T15:21:50,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-04T15:21:50,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/14da791c8de44e22bdaaecc173a0250a, entries=150, sequenceid=567, filesize=12.0 K 2024-12-04T15:21:50,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for e2e19d2bb9bfcadbc1f5e0b910706700 in 532ms, sequenceid=567, compaction requested=false 2024-12-04T15:21:50,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:50,044 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-04T15:21:50,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:50,051 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-04T15:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:50,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/fd433071f9844d9cbd6867b30d85235d is 50, key is test_row_0/A:col10/1733325709552/Put/seqid=0 2024-12-04T15:21:50,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741960_1136 (size=12301) 2024-12-04T15:21:50,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:50,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. as already flushing 2024-12-04T15:21:50,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325770239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325770241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325770242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,344 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496ef540 to 127.0.0.1:55739 2024-12-04T15:21:50,344 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:50,346 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x334e87ba to 127.0.0.1:55739 2024-12-04T15:21:50,346 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:50,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325770347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,348 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3738df30 to 127.0.0.1:55739 2024-12-04T15:21:50,348 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:50,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325770348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325770349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,362 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d88de0e to 127.0.0.1:55739 2024-12-04T15:21:50,362 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:50,509 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/fd433071f9844d9cbd6867b30d85235d 2024-12-04T15:21:50,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/45bde034e7dc4049b35d86ac18d3e4aa is 50, key is test_row_0/B:col10/1733325709552/Put/seqid=0 2024-12-04T15:21:50,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-04T15:21:50,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741961_1137 (size=12301) 2024-12-04T15:21:50,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325770550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325770551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325770551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55764 deadline: 1733325770853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55744 deadline: 1733325770853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:50,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55704 deadline: 1733325770854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:50,931 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/45bde034e7dc4049b35d86ac18d3e4aa 2024-12-04T15:21:50,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/1863c1a97ac54793b80f1aa274f5bf61 is 50, key is test_row_0/C:col10/1733325709552/Put/seqid=0 2024-12-04T15:21:50,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741962_1138 (size=12301) 2024-12-04T15:21:50,944 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/1863c1a97ac54793b80f1aa274f5bf61 2024-12-04T15:21:50,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/fd433071f9844d9cbd6867b30d85235d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd433071f9844d9cbd6867b30d85235d 2024-12-04T15:21:50,953 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd433071f9844d9cbd6867b30d85235d, entries=150, sequenceid=579, filesize=12.0 K 2024-12-04T15:21:50,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/45bde034e7dc4049b35d86ac18d3e4aa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/45bde034e7dc4049b35d86ac18d3e4aa 2024-12-04T15:21:50,965 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/45bde034e7dc4049b35d86ac18d3e4aa, entries=150, sequenceid=579, filesize=12.0 K 2024-12-04T15:21:50,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/1863c1a97ac54793b80f1aa274f5bf61 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1863c1a97ac54793b80f1aa274f5bf61 2024-12-04T15:21:50,971 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1863c1a97ac54793b80f1aa274f5bf61, entries=150, sequenceid=579, filesize=12.0 K 2024-12-04T15:21:50,972 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for e2e19d2bb9bfcadbc1f5e0b910706700 in 921ms, sequenceid=579, compaction requested=true 2024-12-04T15:21:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:50,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-04T15:21:50,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-04T15:21:50,975 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-04T15:21:50,975 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5540 sec 2024-12-04T15:21:50,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.5590 sec 2024-12-04T15:21:51,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:51,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-04T15:21:51,111 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f7de97f to 127.0.0.1:55739 2024-12-04T15:21:51,111 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:51,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/47657b68f34745648c2b284767c8a558 is 50, key is test_row_0/A:col10/1733325710240/Put/seqid=0 2024-12-04T15:21:51,119 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x731457d6 to 127.0.0.1:55739 2024-12-04T15:21:51,119 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:51,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741963_1139 (size=12301) 2024-12-04T15:21:51,357 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ecd23be to 127.0.0.1:55739 2024-12-04T15:21:51,358 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:51,361 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x451dda6c to 127.0.0.1:55739 2024-12-04T15:21:51,361 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:51,361 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584bdfe to 127.0.0.1:55739 2024-12-04T15:21:51,361 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:51,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-04T15:21:51,529 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-04T15:21:51,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-04T15:21:51,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-12-04T15:21:51,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 95 2024-12-04T15:21:51,529 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 136 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3915 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3933 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1745 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5235 rows 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1769 2024-12-04T15:21:51,530 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5306 rows 2024-12-04T15:21:51,530 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:21:51,530 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a212a13 to 127.0.0.1:55739 2024-12-04T15:21:51,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:21:51,536 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-04T15:21:51,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=607 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/47657b68f34745648c2b284767c8a558 2024-12-04T15:21:51,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-04T15:21:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:51,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/17b171a3bbed4ff6bfdcbd4043c622cc is 50, key is test_row_0/B:col10/1733325710240/Put/seqid=0 2024-12-04T15:21:51,557 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325711557"}]},"ts":"1733325711557"} 2024-12-04T15:21:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-04T15:21:51,559 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-04T15:21:51,563 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-04T15:21:51,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:21:51,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, UNASSIGN}] 2024-12-04T15:21:51,575 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, UNASSIGN 2024-12-04T15:21:51,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741964_1140 (size=12301) 2024-12-04T15:21:51,576 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=e2e19d2bb9bfcadbc1f5e0b910706700, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:51,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=607 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/17b171a3bbed4ff6bfdcbd4043c622cc 2024-12-04T15:21:51,577 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:21:51,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:51,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/0c03fd15dcad40b39de737599201607a is 50, key is test_row_0/C:col10/1733325710240/Put/seqid=0 2024-12-04T15:21:51,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741965_1141 (size=12301) 2024-12-04T15:21:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-04T15:21:51,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:51,736 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:51,736 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:21:51,737 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing e2e19d2bb9bfcadbc1f5e0b910706700, disabling compactions & flushes 2024-12-04T15:21:51,737 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-04T15:21:51,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=607 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/0c03fd15dcad40b39de737599201607a 2024-12-04T15:21:51,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/47657b68f34745648c2b284767c8a558 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/47657b68f34745648c2b284767c8a558 2024-12-04T15:21:52,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/47657b68f34745648c2b284767c8a558, entries=150, sequenceid=607, filesize=12.0 K 2024-12-04T15:21:52,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/17b171a3bbed4ff6bfdcbd4043c622cc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/17b171a3bbed4ff6bfdcbd4043c622cc 2024-12-04T15:21:52,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/17b171a3bbed4ff6bfdcbd4043c622cc, entries=150, sequenceid=607, filesize=12.0 K 2024-12-04T15:21:52,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/0c03fd15dcad40b39de737599201607a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0c03fd15dcad40b39de737599201607a 2024-12-04T15:21:52,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0c03fd15dcad40b39de737599201607a, entries=150, sequenceid=607, filesize=12.0 K 2024-12-04T15:21:52,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=26.84 KB/27480 for e2e19d2bb9bfcadbc1f5e0b910706700 in 903ms, sequenceid=607, compaction requested=true 2024-12-04T15:21:52,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:52,014 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:52,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:52,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:A 2024-12-04T15:21:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. after waiting 0 ms 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:52,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:B, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:52,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:B 2024-12-04T15:21:52,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. because compaction request was cancelled 2024-12-04T15:21:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2e19d2bb9bfcadbc1f5e0b910706700:C, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:21:52,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2e19d2bb9bfcadbc1f5e0b910706700:C 2024-12-04T15:21:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:21:52,015 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing e2e19d2bb9bfcadbc1f5e0b910706700 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=A 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=B 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:52,015 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2e19d2bb9bfcadbc1f5e0b910706700, store=C 2024-12-04T15:21:52,016 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:52,025 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/0b5d7604b12749fdb23b078aee0dcce9 is 50, key is test_row_0/A:col10/1733325711356/Put/seqid=0 2024-12-04T15:21:52,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741966_1142 (size=9857) 2024-12-04T15:21:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-04T15:21:52,213 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T15:21:52,431 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=614 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/0b5d7604b12749fdb23b078aee0dcce9 2024-12-04T15:21:52,439 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/46bd14d0113a4d3ba70f041e6c4f0073 is 50, key is test_row_0/B:col10/1733325711356/Put/seqid=0 2024-12-04T15:21:52,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741967_1143 (size=9857) 2024-12-04T15:21:52,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-04T15:21:52,846 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=614 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/46bd14d0113a4d3ba70f041e6c4f0073 2024-12-04T15:21:52,857 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/60e7fc49e7f34aef85877a00761ad5fa is 50, key is test_row_0/C:col10/1733325711356/Put/seqid=0 2024-12-04T15:21:52,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741968_1144 (size=9857) 2024-12-04T15:21:52,869 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=614 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/60e7fc49e7f34aef85877a00761ad5fa 2024-12-04T15:21:52,879 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/A/0b5d7604b12749fdb23b078aee0dcce9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/0b5d7604b12749fdb23b078aee0dcce9 2024-12-04T15:21:52,888 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/0b5d7604b12749fdb23b078aee0dcce9, entries=100, sequenceid=614, filesize=9.6 K 2024-12-04T15:21:52,889 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/B/46bd14d0113a4d3ba70f041e6c4f0073 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/46bd14d0113a4d3ba70f041e6c4f0073 2024-12-04T15:21:52,895 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/46bd14d0113a4d3ba70f041e6c4f0073, entries=100, sequenceid=614, filesize=9.6 K 2024-12-04T15:21:52,899 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/.tmp/C/60e7fc49e7f34aef85877a00761ad5fa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/60e7fc49e7f34aef85877a00761ad5fa 2024-12-04T15:21:52,907 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/60e7fc49e7f34aef85877a00761ad5fa, entries=100, sequenceid=614, filesize=9.6 K 2024-12-04T15:21:52,908 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for e2e19d2bb9bfcadbc1f5e0b910706700 in 893ms, sequenceid=614, compaction requested=true 2024-12-04T15:21:52,909 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef8bb7216b284dbc87758cd419ce5461, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/52242619bf1f40aca9f9ba0d16d91eaa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b437dbcd80d84e528eef635ef28fbc32, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/6209b51762b4457eb598b5b0578378cc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b04ae325d5384cabbdc0d1da9e9ef1f6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4a974183deb04377bef74bac737c34f7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2ce0c5b8373a45199dfc3d32db4dbd56, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4183440fcdd54d8fbd75abdce6abb62d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/504d34d2bcff46379fcb0d0f3ce3f0d4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/97eb499b0aeb431b8f53a9bb11d485a9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef1314cb18b54f1c8613b4a3c4a3a0b4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7cd52599af43475a81d66798052aaa2a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3b3e0252e9b4468bb46400e8ecfb5110, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a8e8bb85900b4d58a10de8d583d5100e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/17f4158d7b5847b3a12f802abdc3fa78, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/66907a41467d4d2092ce2b023129e954, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/325431971d9d4ce88ab2f8d5fcd1be25, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/28d080a2a3d148ba97b17c5d4fa73eef, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1cdaf297791a4dfea1bc390716fcb43a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ac51b1ba14794d3e91b7429e4bd07653, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/08b203c7b5a140fc94563743176d55dd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3751a3e688b640edb5daed2927dfa6ae, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b34ee4bdb7e444a086eac0d42e2a2b81, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/667e5bc8c04d4d43a37e1318430975c8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7d1ca61540d54a6c9d7dfa4319e1cb71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd3d3aa51fb14ea8b8efd3b42b1767a0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ccd53799ea3d4d718a9de6a461c13e53, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7e1bbf12a8bc4beb872938ebaa6079ff, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1c10aff18a834ac2bc784baae691c5ca, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/f4283cd0348645199bbb154a0f8fda55, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/5e4fd79f0a1a4fafa75dac34f0672397, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/736471009c5b47cca87b635eacc25833, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2b8f8c7c47774d48ba6796d5820b82c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7bd08a4fa9a8469bb1c2c73328d97c0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/07eb0784a0d0404d82cd87319c7e377c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/148f7574af7049d4b1d3347fa455a453, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a9f6d369632c466b95f586a1259a20e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/117af1f8a7184455af9b0b12c6337aa2] to archive 2024-12-04T15:21:52,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:21:52,924 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef8bb7216b284dbc87758cd419ce5461 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef8bb7216b284dbc87758cd419ce5461 2024-12-04T15:21:52,926 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/52242619bf1f40aca9f9ba0d16d91eaa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/52242619bf1f40aca9f9ba0d16d91eaa 2024-12-04T15:21:52,928 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b437dbcd80d84e528eef635ef28fbc32 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b437dbcd80d84e528eef635ef28fbc32 2024-12-04T15:21:52,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/6209b51762b4457eb598b5b0578378cc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/6209b51762b4457eb598b5b0578378cc 2024-12-04T15:21:52,931 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b04ae325d5384cabbdc0d1da9e9ef1f6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b04ae325d5384cabbdc0d1da9e9ef1f6 2024-12-04T15:21:52,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4a974183deb04377bef74bac737c34f7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4a974183deb04377bef74bac737c34f7 2024-12-04T15:21:52,934 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2ce0c5b8373a45199dfc3d32db4dbd56 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2ce0c5b8373a45199dfc3d32db4dbd56 2024-12-04T15:21:52,936 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4183440fcdd54d8fbd75abdce6abb62d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/4183440fcdd54d8fbd75abdce6abb62d 2024-12-04T15:21:52,937 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/504d34d2bcff46379fcb0d0f3ce3f0d4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/504d34d2bcff46379fcb0d0f3ce3f0d4 2024-12-04T15:21:52,939 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/97eb499b0aeb431b8f53a9bb11d485a9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/97eb499b0aeb431b8f53a9bb11d485a9 2024-12-04T15:21:52,940 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a6d3e5aa7d4c4a07b2ec7a3a837b3cae 2024-12-04T15:21:52,941 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef1314cb18b54f1c8613b4a3c4a3a0b4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ef1314cb18b54f1c8613b4a3c4a3a0b4 2024-12-04T15:21:52,942 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7cd52599af43475a81d66798052aaa2a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7cd52599af43475a81d66798052aaa2a 2024-12-04T15:21:52,944 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3b3e0252e9b4468bb46400e8ecfb5110 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3b3e0252e9b4468bb46400e8ecfb5110 2024-12-04T15:21:52,945 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a8e8bb85900b4d58a10de8d583d5100e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a8e8bb85900b4d58a10de8d583d5100e 2024-12-04T15:21:52,947 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/17f4158d7b5847b3a12f802abdc3fa78 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/17f4158d7b5847b3a12f802abdc3fa78 2024-12-04T15:21:52,949 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/66907a41467d4d2092ce2b023129e954 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/66907a41467d4d2092ce2b023129e954 2024-12-04T15:21:52,950 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/325431971d9d4ce88ab2f8d5fcd1be25 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/325431971d9d4ce88ab2f8d5fcd1be25 2024-12-04T15:21:52,951 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/28d080a2a3d148ba97b17c5d4fa73eef to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/28d080a2a3d148ba97b17c5d4fa73eef 2024-12-04T15:21:52,953 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1cdaf297791a4dfea1bc390716fcb43a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1cdaf297791a4dfea1bc390716fcb43a 2024-12-04T15:21:52,954 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ac51b1ba14794d3e91b7429e4bd07653 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ac51b1ba14794d3e91b7429e4bd07653 2024-12-04T15:21:52,956 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/08b203c7b5a140fc94563743176d55dd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/08b203c7b5a140fc94563743176d55dd 2024-12-04T15:21:52,960 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3751a3e688b640edb5daed2927dfa6ae to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/3751a3e688b640edb5daed2927dfa6ae 2024-12-04T15:21:52,961 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b34ee4bdb7e444a086eac0d42e2a2b81 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/b34ee4bdb7e444a086eac0d42e2a2b81 2024-12-04T15:21:52,963 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/667e5bc8c04d4d43a37e1318430975c8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/667e5bc8c04d4d43a37e1318430975c8 2024-12-04T15:21:52,964 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7d1ca61540d54a6c9d7dfa4319e1cb71 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7d1ca61540d54a6c9d7dfa4319e1cb71 2024-12-04T15:21:52,965 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd3d3aa51fb14ea8b8efd3b42b1767a0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd3d3aa51fb14ea8b8efd3b42b1767a0 2024-12-04T15:21:52,967 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ccd53799ea3d4d718a9de6a461c13e53 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/ccd53799ea3d4d718a9de6a461c13e53 2024-12-04T15:21:52,968 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7e1bbf12a8bc4beb872938ebaa6079ff to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7e1bbf12a8bc4beb872938ebaa6079ff 2024-12-04T15:21:52,969 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1c10aff18a834ac2bc784baae691c5ca to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/1c10aff18a834ac2bc784baae691c5ca 2024-12-04T15:21:52,972 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/f4283cd0348645199bbb154a0f8fda55 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/f4283cd0348645199bbb154a0f8fda55 2024-12-04T15:21:52,980 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/5e4fd79f0a1a4fafa75dac34f0672397 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/5e4fd79f0a1a4fafa75dac34f0672397 2024-12-04T15:21:52,994 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/736471009c5b47cca87b635eacc25833 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/736471009c5b47cca87b635eacc25833 2024-12-04T15:21:53,001 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2b8f8c7c47774d48ba6796d5820b82c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/2b8f8c7c47774d48ba6796d5820b82c4 2024-12-04T15:21:53,005 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7bd08a4fa9a8469bb1c2c73328d97c0c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/7bd08a4fa9a8469bb1c2c73328d97c0c 2024-12-04T15:21:53,007 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/07eb0784a0d0404d82cd87319c7e377c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/07eb0784a0d0404d82cd87319c7e377c 2024-12-04T15:21:53,009 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/148f7574af7049d4b1d3347fa455a453 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/148f7574af7049d4b1d3347fa455a453 2024-12-04T15:21:53,011 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a9f6d369632c466b95f586a1259a20e8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/a9f6d369632c466b95f586a1259a20e8 2024-12-04T15:21:53,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/117af1f8a7184455af9b0b12c6337aa2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/117af1f8a7184455af9b0b12c6337aa2 2024-12-04T15:21:53,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/30e4f0c9c3094328b4dfd718d8feaa57, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0001fdca5f1649d9815a141c13dd224f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/08132e240abe4ef0b171abcf320cca72, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/316c156b65414e1faced5ef02c14bafc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f64f520bdb854bdfa825e935b4fa97cc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92ae75732eca410494b462b58069491d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/731b79f5c3814400bf0474eb55fed6f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/a237298a1bc94fc8aeb82efdd68a1ebd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/05c495ef7b484f1eb4846729a0a323be, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/32b77f5667ea434499164eafbc687ed5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f4cba283fd3d4fc8b9f53adcbc5c4a71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/5437f851b986446792632077a7426374, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78133090d2bf42a6ba9cf7d1678a8869, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/563315c6ea964bee87cc7cc720893bac, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/053e08fbc64444f19b1864409af89de9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0aa96b9b63e34dbe942857ff00c43072, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/20a55b6a91324218b9f680942d4c44bb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1aa090ef2c9242068634363cf0453b84, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ca66bcbd425e45b38b0d5a705ca168c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78bf3dfe597341a38aa47c2e16d5ded8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/d1df564e84c7470a8d81ec3354700cbf, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/c051ea836e554c89868eb4b4e1f46ce9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/02d647351e6d4c91ac3a16d4801109ae, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/050bd1a5a9fb42cca0c41ef38520de91, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1a8bbac878d54e6aa430f6eeba442f30, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/82477eae13c54293abb8bf8bdfa3effa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/b97bc7ba9bf64ce1bccb923e21214640, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e3ef428f213144cfbf1c959d3b3ac2a8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/3f66956edf314661a672e40e546b5fb6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/038c510a040d41bf9676f27ce87c0cb3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/8c247cd4d69c4691bfd0420ea1ab1a92, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86f98b20d86944ec8b1d54387ba60046, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/48977ed27b614ea78961ce3b3e14e118, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e1271bf4b9c444038408aa65326e1d0d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92c99a1ae6b044298d3d4757d099fed0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/266b5549d537417d9433f2a51e3691ed, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ccde8e307095425691ba49bde19a76d0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0899065261d4400292dae0a1b3634ae1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/409e45ea2ebd47768cf697f330c84816] to archive 2024-12-04T15:21:53,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:21:53,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/30e4f0c9c3094328b4dfd718d8feaa57 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/30e4f0c9c3094328b4dfd718d8feaa57 2024-12-04T15:21:53,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0001fdca5f1649d9815a141c13dd224f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0001fdca5f1649d9815a141c13dd224f 2024-12-04T15:21:53,044 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/08132e240abe4ef0b171abcf320cca72 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/08132e240abe4ef0b171abcf320cca72 2024-12-04T15:21:53,047 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/316c156b65414e1faced5ef02c14bafc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/316c156b65414e1faced5ef02c14bafc 2024-12-04T15:21:53,053 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f64f520bdb854bdfa825e935b4fa97cc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f64f520bdb854bdfa825e935b4fa97cc 2024-12-04T15:21:53,054 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92ae75732eca410494b462b58069491d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92ae75732eca410494b462b58069491d 2024-12-04T15:21:53,056 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/731b79f5c3814400bf0474eb55fed6f9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/731b79f5c3814400bf0474eb55fed6f9 2024-12-04T15:21:53,057 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/a237298a1bc94fc8aeb82efdd68a1ebd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/a237298a1bc94fc8aeb82efdd68a1ebd 2024-12-04T15:21:53,059 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/05c495ef7b484f1eb4846729a0a323be to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/05c495ef7b484f1eb4846729a0a323be 2024-12-04T15:21:53,061 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/32b77f5667ea434499164eafbc687ed5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/32b77f5667ea434499164eafbc687ed5 2024-12-04T15:21:53,062 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f4cba283fd3d4fc8b9f53adcbc5c4a71 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/f4cba283fd3d4fc8b9f53adcbc5c4a71 2024-12-04T15:21:53,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/5437f851b986446792632077a7426374 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/5437f851b986446792632077a7426374 2024-12-04T15:21:53,067 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78133090d2bf42a6ba9cf7d1678a8869 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78133090d2bf42a6ba9cf7d1678a8869 2024-12-04T15:21:53,069 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/563315c6ea964bee87cc7cc720893bac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/563315c6ea964bee87cc7cc720893bac 2024-12-04T15:21:53,070 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/053e08fbc64444f19b1864409af89de9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/053e08fbc64444f19b1864409af89de9 2024-12-04T15:21:53,072 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0aa96b9b63e34dbe942857ff00c43072 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0aa96b9b63e34dbe942857ff00c43072 2024-12-04T15:21:53,073 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/20a55b6a91324218b9f680942d4c44bb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/20a55b6a91324218b9f680942d4c44bb 2024-12-04T15:21:53,074 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1aa090ef2c9242068634363cf0453b84 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1aa090ef2c9242068634363cf0453b84 2024-12-04T15:21:53,075 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ca66bcbd425e45b38b0d5a705ca168c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ca66bcbd425e45b38b0d5a705ca168c4 2024-12-04T15:21:53,077 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78bf3dfe597341a38aa47c2e16d5ded8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/78bf3dfe597341a38aa47c2e16d5ded8 2024-12-04T15:21:53,078 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/d1df564e84c7470a8d81ec3354700cbf to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/d1df564e84c7470a8d81ec3354700cbf 2024-12-04T15:21:53,079 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/c051ea836e554c89868eb4b4e1f46ce9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/c051ea836e554c89868eb4b4e1f46ce9 2024-12-04T15:21:53,081 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/02d647351e6d4c91ac3a16d4801109ae to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/02d647351e6d4c91ac3a16d4801109ae 2024-12-04T15:21:53,082 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/050bd1a5a9fb42cca0c41ef38520de91 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/050bd1a5a9fb42cca0c41ef38520de91 2024-12-04T15:21:53,083 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1a8bbac878d54e6aa430f6eeba442f30 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/1a8bbac878d54e6aa430f6eeba442f30 2024-12-04T15:21:53,084 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/82477eae13c54293abb8bf8bdfa3effa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/82477eae13c54293abb8bf8bdfa3effa 2024-12-04T15:21:53,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/b97bc7ba9bf64ce1bccb923e21214640 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/b97bc7ba9bf64ce1bccb923e21214640 2024-12-04T15:21:53,087 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e3ef428f213144cfbf1c959d3b3ac2a8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e3ef428f213144cfbf1c959d3b3ac2a8 2024-12-04T15:21:53,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/3f66956edf314661a672e40e546b5fb6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/3f66956edf314661a672e40e546b5fb6 2024-12-04T15:21:53,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/038c510a040d41bf9676f27ce87c0cb3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/038c510a040d41bf9676f27ce87c0cb3 2024-12-04T15:21:53,090 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/8c247cd4d69c4691bfd0420ea1ab1a92 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/8c247cd4d69c4691bfd0420ea1ab1a92 2024-12-04T15:21:53,092 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86f98b20d86944ec8b1d54387ba60046 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86f98b20d86944ec8b1d54387ba60046 2024-12-04T15:21:53,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/48977ed27b614ea78961ce3b3e14e118 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/48977ed27b614ea78961ce3b3e14e118 2024-12-04T15:21:53,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e1271bf4b9c444038408aa65326e1d0d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/e1271bf4b9c444038408aa65326e1d0d 2024-12-04T15:21:53,097 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92c99a1ae6b044298d3d4757d099fed0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/92c99a1ae6b044298d3d4757d099fed0 2024-12-04T15:21:53,098 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/266b5549d537417d9433f2a51e3691ed to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/266b5549d537417d9433f2a51e3691ed 2024-12-04T15:21:53,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ccde8e307095425691ba49bde19a76d0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/ccde8e307095425691ba49bde19a76d0 2024-12-04T15:21:53,100 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0899065261d4400292dae0a1b3634ae1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/0899065261d4400292dae0a1b3634ae1 2024-12-04T15:21:53,102 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/409e45ea2ebd47768cf697f330c84816 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/409e45ea2ebd47768cf697f330c84816 2024-12-04T15:21:53,109 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1c120041837043a98c3668e9605a8152, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7d8897002c134c9c956145be7be2f041, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f847537f0bd84866ac1ed21ffbe55e54, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f3246de1084f4664ac24feb4aa4e2597, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e01f910056304890995b04d8d0120e2a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/3146490b8eb0430caeeecd2ad3793874, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/5bde765ba4ff412f9f92ff9758dd746e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/430d9cc676034c99b8818f7cb767ee91, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dfce65007004fca95890779eb8907b9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b5ef8b9009f04fc8ab9c0084af61eb87, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b90573bbea6740ab86e1a878e2998e5d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/35e15ac0294f40d0bf84e29927740fe0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f2ba857de0484fb5899943c3fe2f43a8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e347c1535aff466a8581c38899b6f6b8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/057aa998dce64968b40be5e58cc7d920, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8fa094dafe7e4fb9a84c9ea49899dcb9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/02345b96ce504456912bc08d3a706c02, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/72f16cf8325c48ba836b037e35213b66, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/73d750b3a0a34d50866dc8c4bbc50df8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7828b917b3e9459a99152742bc57ae80, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/c3bfe16fc9c9460688ffe53230e581c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2cbde14c6f3147b7b22d6dc6895adef6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0cf340636d43473bb4e647022aac792e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e7212d5576a441259fdfcdeef258e18b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/6394a36f4a564cd9b9825555a5ddd853, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2b1789977c254fe6af392b4a977d1c15, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/164c295e679c49a48f668cc29de052c9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7b9ea51a879442f4b55e033da9414890, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dbcea4e817b48afb7feb11339fe2e41, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b0a1aa4a81e04921bb55767f20449fb9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/84e134630c80464db36aa44c2cdf70a4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/586407996d644cdcaa2b741c2ae309b8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2e9d4beb45e045349726ceed9d1a2bd2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/34265c5c7dbe4a78b2dc384b482d23be, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/aaee37cf9fde4b629ef444852fe39aa7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7defe4aca756400a8fe6ca89484b5134, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2a5e01c6416a4116b5fbb71d0ef474d7] to archive 2024-12-04T15:21:53,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:21:53,118 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1c120041837043a98c3668e9605a8152 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1c120041837043a98c3668e9605a8152 2024-12-04T15:21:53,120 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7d8897002c134c9c956145be7be2f041 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7d8897002c134c9c956145be7be2f041 2024-12-04T15:21:53,121 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f847537f0bd84866ac1ed21ffbe55e54 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f847537f0bd84866ac1ed21ffbe55e54 2024-12-04T15:21:53,123 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f3246de1084f4664ac24feb4aa4e2597 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f3246de1084f4664ac24feb4aa4e2597 2024-12-04T15:21:53,125 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e01f910056304890995b04d8d0120e2a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e01f910056304890995b04d8d0120e2a 2024-12-04T15:21:53,126 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/3146490b8eb0430caeeecd2ad3793874 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/3146490b8eb0430caeeecd2ad3793874 2024-12-04T15:21:53,128 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/5bde765ba4ff412f9f92ff9758dd746e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/5bde765ba4ff412f9f92ff9758dd746e 2024-12-04T15:21:53,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/430d9cc676034c99b8818f7cb767ee91 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/430d9cc676034c99b8818f7cb767ee91 2024-12-04T15:21:53,156 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dfce65007004fca95890779eb8907b9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dfce65007004fca95890779eb8907b9 2024-12-04T15:21:53,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b5ef8b9009f04fc8ab9c0084af61eb87 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b5ef8b9009f04fc8ab9c0084af61eb87 2024-12-04T15:21:53,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b90573bbea6740ab86e1a878e2998e5d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b90573bbea6740ab86e1a878e2998e5d 2024-12-04T15:21:53,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/35e15ac0294f40d0bf84e29927740fe0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/35e15ac0294f40d0bf84e29927740fe0 2024-12-04T15:21:53,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f2ba857de0484fb5899943c3fe2f43a8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/f2ba857de0484fb5899943c3fe2f43a8 2024-12-04T15:21:53,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e347c1535aff466a8581c38899b6f6b8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e347c1535aff466a8581c38899b6f6b8 2024-12-04T15:21:53,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/057aa998dce64968b40be5e58cc7d920 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/057aa998dce64968b40be5e58cc7d920 2024-12-04T15:21:53,200 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8fa094dafe7e4fb9a84c9ea49899dcb9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8fa094dafe7e4fb9a84c9ea49899dcb9 2024-12-04T15:21:53,202 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/02345b96ce504456912bc08d3a706c02 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/02345b96ce504456912bc08d3a706c02 2024-12-04T15:21:53,205 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/72f16cf8325c48ba836b037e35213b66 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/72f16cf8325c48ba836b037e35213b66 2024-12-04T15:21:53,208 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/73d750b3a0a34d50866dc8c4bbc50df8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/73d750b3a0a34d50866dc8c4bbc50df8 2024-12-04T15:21:53,210 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7828b917b3e9459a99152742bc57ae80 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7828b917b3e9459a99152742bc57ae80 2024-12-04T15:21:53,212 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/c3bfe16fc9c9460688ffe53230e581c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/c3bfe16fc9c9460688ffe53230e581c4 2024-12-04T15:21:53,236 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2cbde14c6f3147b7b22d6dc6895adef6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2cbde14c6f3147b7b22d6dc6895adef6 2024-12-04T15:21:53,252 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0cf340636d43473bb4e647022aac792e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0cf340636d43473bb4e647022aac792e 2024-12-04T15:21:53,260 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e7212d5576a441259fdfcdeef258e18b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/e7212d5576a441259fdfcdeef258e18b 2024-12-04T15:21:53,261 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/6394a36f4a564cd9b9825555a5ddd853 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/6394a36f4a564cd9b9825555a5ddd853 2024-12-04T15:21:53,263 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2b1789977c254fe6af392b4a977d1c15 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2b1789977c254fe6af392b4a977d1c15 2024-12-04T15:21:53,264 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/164c295e679c49a48f668cc29de052c9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/164c295e679c49a48f668cc29de052c9 2024-12-04T15:21:53,265 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7b9ea51a879442f4b55e033da9414890 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7b9ea51a879442f4b55e033da9414890 2024-12-04T15:21:53,266 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dbcea4e817b48afb7feb11339fe2e41 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/8dbcea4e817b48afb7feb11339fe2e41 2024-12-04T15:21:53,267 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b0a1aa4a81e04921bb55767f20449fb9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/b0a1aa4a81e04921bb55767f20449fb9 2024-12-04T15:21:53,269 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/84e134630c80464db36aa44c2cdf70a4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/84e134630c80464db36aa44c2cdf70a4 2024-12-04T15:21:53,270 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/586407996d644cdcaa2b741c2ae309b8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/586407996d644cdcaa2b741c2ae309b8 2024-12-04T15:21:53,271 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2e9d4beb45e045349726ceed9d1a2bd2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2e9d4beb45e045349726ceed9d1a2bd2 2024-12-04T15:21:53,273 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/34265c5c7dbe4a78b2dc384b482d23be to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/34265c5c7dbe4a78b2dc384b482d23be 2024-12-04T15:21:53,274 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/aaee37cf9fde4b629ef444852fe39aa7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/aaee37cf9fde4b629ef444852fe39aa7 2024-12-04T15:21:53,275 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7defe4aca756400a8fe6ca89484b5134 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/7defe4aca756400a8fe6ca89484b5134 2024-12-04T15:21:53,276 DEBUG [StoreCloser-TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2a5e01c6416a4116b5fbb71d0ef474d7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/2a5e01c6416a4116b5fbb71d0ef474d7 2024-12-04T15:21:53,282 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/recovered.edits/617.seqid, newMaxSeqId=617, maxSeqId=1 2024-12-04T15:21:53,285 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700. 2024-12-04T15:21:53,285 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for e2e19d2bb9bfcadbc1f5e0b910706700: 2024-12-04T15:21:53,287 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:53,288 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=e2e19d2bb9bfcadbc1f5e0b910706700, regionState=CLOSED 2024-12-04T15:21:53,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-04T15:21:53,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure e2e19d2bb9bfcadbc1f5e0b910706700, server=645c2dbfef2e,42169,1733325683856 in 1.7120 sec 2024-12-04T15:21:53,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-04T15:21:53,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2e19d2bb9bfcadbc1f5e0b910706700, UNASSIGN in 1.7170 sec 2024-12-04T15:21:53,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-04T15:21:53,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7290 sec 2024-12-04T15:21:53,296 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325713295"}]},"ts":"1733325713295"} 2024-12-04T15:21:53,297 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-04T15:21:53,299 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-04T15:21:53,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7550 sec 2024-12-04T15:21:53,627 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-04T15:21:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-04T15:21:53,664 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-04T15:21:53,668 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-04T15:21:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:53,675 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:53,677 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-04T15:21:53,680 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:53,685 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/recovered.edits] 2024-12-04T15:21:53,690 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/04dccb22ec324e2a9a657d470d6bd6b2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/04dccb22ec324e2a9a657d470d6bd6b2 2024-12-04T15:21:53,692 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/0b5d7604b12749fdb23b078aee0dcce9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/0b5d7604b12749fdb23b078aee0dcce9 2024-12-04T15:21:53,699 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/47657b68f34745648c2b284767c8a558 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/47657b68f34745648c2b284767c8a558 2024-12-04T15:21:53,701 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/e8dfbd5836524630848165eddfba05f9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/e8dfbd5836524630848165eddfba05f9 2024-12-04T15:21:53,703 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd433071f9844d9cbd6867b30d85235d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/A/fd433071f9844d9cbd6867b30d85235d 2024-12-04T15:21:53,707 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/17b171a3bbed4ff6bfdcbd4043c622cc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/17b171a3bbed4ff6bfdcbd4043c622cc 2024-12-04T15:21:53,708 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/2749a3b9eea54aa6ae88c5d58c0f176d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/2749a3b9eea54aa6ae88c5d58c0f176d 2024-12-04T15:21:53,712 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/45bde034e7dc4049b35d86ac18d3e4aa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/45bde034e7dc4049b35d86ac18d3e4aa 2024-12-04T15:21:53,715 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/46bd14d0113a4d3ba70f041e6c4f0073 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/46bd14d0113a4d3ba70f041e6c4f0073 2024-12-04T15:21:53,717 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86b6dbf0cdcd41aaa66c2d5e9d40667e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/B/86b6dbf0cdcd41aaa66c2d5e9d40667e 2024-12-04T15:21:53,721 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0c03fd15dcad40b39de737599201607a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/0c03fd15dcad40b39de737599201607a 2024-12-04T15:21:53,724 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/14da791c8de44e22bdaaecc173a0250a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/14da791c8de44e22bdaaecc173a0250a 2024-12-04T15:21:53,726 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1863c1a97ac54793b80f1aa274f5bf61 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/1863c1a97ac54793b80f1aa274f5bf61 2024-12-04T15:21:53,728 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/60e7fc49e7f34aef85877a00761ad5fa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/60e7fc49e7f34aef85877a00761ad5fa 2024-12-04T15:21:53,731 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/9dfcae00f8c445218bec06d35e7ce07e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/C/9dfcae00f8c445218bec06d35e7ce07e 2024-12-04T15:21:53,737 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/recovered.edits/617.seqid to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700/recovered.edits/617.seqid 2024-12-04T15:21:53,738 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e2e19d2bb9bfcadbc1f5e0b910706700 2024-12-04T15:21:53,738 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-04T15:21:53,745 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:53,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-04T15:21:53,756 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-04T15:21:53,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-04T15:21:53,801 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-04T15:21:53,805 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:53,805 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-04T15:21:53,805 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733325713805"}]},"ts":"9223372036854775807"} 2024-12-04T15:21:53,817 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-04T15:21:53,817 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e2e19d2bb9bfcadbc1f5e0b910706700, NAME => 'TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700.', STARTKEY => '', ENDKEY => ''}] 2024-12-04T15:21:53,817 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-04T15:21:53,818 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733325713817"}]},"ts":"9223372036854775807"} 2024-12-04T15:21:53,823 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-04T15:21:53,826 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:53,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 157 msec 2024-12-04T15:21:53,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-04T15:21:53,987 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-12-04T15:21:54,004 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=240 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_87544207_22 at /127.0.0.1:58960 [Waiting for operation #249] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x656e3dc1-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_87544207_22 at /127.0.0.1:42034 [Waiting for operation #278] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x656e3dc1-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x656e3dc1-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1641455134_22 at /127.0.0.1:41874 [Waiting for operation #302] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;645c2dbfef2e:42169-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x656e3dc1-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=920 (was 865) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3764 (was 4692) 2024-12-04T15:21:54,016 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=240, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=920, ProcessCount=11, AvailableMemoryMB=3764 2024-12-04T15:21:54,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:21:54,019 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:21:54,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:54,022 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:21:54,022 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:54,022 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-12-04T15:21:54,023 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:21:54,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-04T15:21:54,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741969_1145 (size=963) 2024-12-04T15:21:54,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-04T15:21:54,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-04T15:21:54,439 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:21:54,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741970_1146 (size=53) 2024-12-04T15:21:54,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-04T15:21:54,853 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:54,853 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3bb1bcca60c6b0bcd1824e7ad9c8f501, disabling compactions & flushes 2024-12-04T15:21:54,854 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:54,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:54,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. after waiting 0 ms 2024-12-04T15:21:54,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:54,854 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:54,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:21:54,855 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:21:54,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733325714855"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325714855"}]},"ts":"1733325714855"} 2024-12-04T15:21:54,857 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:21:54,858 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:21:54,859 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325714858"}]},"ts":"1733325714858"} 2024-12-04T15:21:54,860 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-04T15:21:54,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, ASSIGN}] 2024-12-04T15:21:54,866 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, ASSIGN 2024-12-04T15:21:54,867 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:21:55,018 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:55,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:55,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-04T15:21:55,173 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:55,177 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:55,177 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:21:55,178 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,178 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:55,178 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,178 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,180 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,182 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:55,182 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb1bcca60c6b0bcd1824e7ad9c8f501 columnFamilyName A 2024-12-04T15:21:55,182 DEBUG [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:55,188 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(327): Store=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:55,188 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,190 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:55,191 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb1bcca60c6b0bcd1824e7ad9c8f501 columnFamilyName B 2024-12-04T15:21:55,191 DEBUG [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:55,192 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(327): Store=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:55,192 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,194 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:55,194 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb1bcca60c6b0bcd1824e7ad9c8f501 columnFamilyName C 2024-12-04T15:21:55,194 DEBUG [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:55,195 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(327): Store=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:55,195 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:55,196 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,196 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,200 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:21:55,204 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:55,207 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:21:55,207 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 3bb1bcca60c6b0bcd1824e7ad9c8f501; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73072860, jitterRate=0.08887046575546265}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:21:55,208 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:21:55,209 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., pid=39, masterSystemTime=1733325715172 2024-12-04T15:21:55,211 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:55,211 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:55,212 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:55,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-04T15:21:55,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 in 194 msec 2024-12-04T15:21:55,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-04T15:21:55,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, ASSIGN in 351 msec 2024-12-04T15:21:55,219 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:21:55,219 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325715219"}]},"ts":"1733325715219"} 2024-12-04T15:21:55,220 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-04T15:21:55,224 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:21:55,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2050 sec 2024-12-04T15:21:56,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-04T15:21:56,131 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-12-04T15:21:56,134 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38284410 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43b89210 2024-12-04T15:21:56,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e52b2aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,145 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,148 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,156 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:21:56,159 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:21:56,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:21:56,169 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:21:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-04T15:21:56,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741971_1147 (size=999) 2024-12-04T15:21:56,206 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-04T15:21:56,206 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-04T15:21:56,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:21:56,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, REOPEN/MOVE}] 2024-12-04T15:21:56,230 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, REOPEN/MOVE 2024-12-04T15:21:56,231 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:56,233 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:21:56,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:56,384 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:56,385 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,385 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:21:56,385 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 3bb1bcca60c6b0bcd1824e7ad9c8f501, disabling compactions & flushes 2024-12-04T15:21:56,385 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,385 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,386 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. after waiting 0 ms 2024-12-04T15:21:56,386 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,390 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-04T15:21:56,391 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,391 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:21:56,391 WARN [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 3bb1bcca60c6b0bcd1824e7ad9c8f501 to self. 2024-12-04T15:21:56,393 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,393 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=CLOSED 2024-12-04T15:21:56,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-04T15:21:56,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 in 162 msec 2024-12-04T15:21:56,400 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, REOPEN/MOVE; state=CLOSED, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=true 2024-12-04T15:21:56,551 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:56,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:21:56,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:56,709 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,710 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:21:56,710 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,710 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:21:56,710 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,710 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,714 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,715 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:56,722 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb1bcca60c6b0bcd1824e7ad9c8f501 columnFamilyName A 2024-12-04T15:21:56,726 DEBUG [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:56,726 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(327): Store=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:56,727 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,728 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:56,728 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb1bcca60c6b0bcd1824e7ad9c8f501 columnFamilyName B 2024-12-04T15:21:56,728 DEBUG [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:56,729 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(327): Store=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:56,731 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,732 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:21:56,732 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb1bcca60c6b0bcd1824e7ad9c8f501 columnFamilyName C 2024-12-04T15:21:56,732 DEBUG [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:56,733 INFO [StoreOpener-3bb1bcca60c6b0bcd1824e7ad9c8f501-1 {}] regionserver.HStore(327): Store=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:21:56,734 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,735 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,736 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,738 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:21:56,740 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,741 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 3bb1bcca60c6b0bcd1824e7ad9c8f501; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65002413, jitterRate=-0.03138856589794159}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:21:56,742 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:21:56,743 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., pid=44, masterSystemTime=1733325716706 2024-12-04T15:21:56,745 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=OPEN, openSeqNum=5, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:56,745 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,746 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:56,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-04T15:21:56,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 in 192 msec 2024-12-04T15:21:56,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-04T15:21:56,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, REOPEN/MOVE in 520 msec 2024-12-04T15:21:56,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-04T15:21:56,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 544 msec 2024-12-04T15:21:56,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 588 msec 2024-12-04T15:21:56,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-04T15:21:56,776 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb5c6ff to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787a5979 2024-12-04T15:21:56,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa127fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a3677e6 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@640dbbc9 2024-12-04T15:21:56,816 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e7d958f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,818 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7461d456 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e5f0ec0 2024-12-04T15:21:56,829 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4155cb5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,831 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63176ac8 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c4c925f 2024-12-04T15:21:56,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba169c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,841 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x01639acb to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@228a7694 2024-12-04T15:21:56,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1255e46c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,847 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x139ceb9b to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ed70b00 2024-12-04T15:21:56,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab39160, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,857 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04d2c596 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@124b4107 2024-12-04T15:21:56,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@465eabf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x57ef66fa to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1fe44679 2024-12-04T15:21:56,871 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34ff7728, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,872 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x427f113e to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f154086 2024-12-04T15:21:56,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@301c2e7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:21:56,889 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:56,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-04T15:21:56,892 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:56,893 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:56,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:56,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-04T15:21:56,905 DEBUG [hconnection-0x523de341-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,906 DEBUG [hconnection-0x5866f590-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,908 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,909 DEBUG [hconnection-0x6104daa5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,911 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,912 DEBUG [hconnection-0x6124f4e5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,913 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,914 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,926 DEBUG [hconnection-0x3136dfc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,927 DEBUG [hconnection-0x18dc6f16-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,928 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,933 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,940 DEBUG [hconnection-0x6297edbb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,945 DEBUG [hconnection-0x6c62da32-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,947 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,947 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,948 DEBUG [hconnection-0x65b7d359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:21:56,950 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:21:56,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:21:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:56,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:21:56,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:56,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:21:56,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:56,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:21:56,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:56,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-04T15:21:57,048 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-04T15:21:57,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:57,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325777052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325777052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325777054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325777062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325777068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042cc7103952be4a0d9f0a21876dd61a0f_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325716976/Put/seqid=0 2024-12-04T15:21:57,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325777174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325777177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325777177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325777178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325777180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-04T15:21:57,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741972_1148 (size=12154) 2024-12-04T15:21:57,218 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:57,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-04T15:21:57,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:57,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,233 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042cc7103952be4a0d9f0a21876dd61a0f_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042cc7103952be4a0d9f0a21876dd61a0f_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:57,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,244 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/da2549b02e1b4290b49436b9ac5bfa9c, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:21:57,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/da2549b02e1b4290b49436b9ac5bfa9c is 175, key is test_row_0/A:col10/1733325716976/Put/seqid=0 2024-12-04T15:21:57,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741973_1149 (size=30955) 2024-12-04T15:21:57,330 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/da2549b02e1b4290b49436b9ac5bfa9c 2024-12-04T15:21:57,334 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:21:57,339 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:21:57,386 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-04T15:21:57,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:57,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325777406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325777408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325777408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/5b7ee73d21ce40dcb29165c97f4aed1f is 50, key is test_row_0/B:col10/1733325716976/Put/seqid=0 2024-12-04T15:21:57,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325777412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325777413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741974_1150 (size=12001) 2024-12-04T15:21:57,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/5b7ee73d21ce40dcb29165c97f4aed1f 2024-12-04T15:21:57,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-04T15:21:57,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-04T15:21:57,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:57,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/2363d74b04054f1b8f8f4a43eb3b61fb is 50, key is test_row_0/C:col10/1733325716976/Put/seqid=0 2024-12-04T15:21:57,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741975_1151 (size=12001) 2024-12-04T15:21:57,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/2363d74b04054f1b8f8f4a43eb3b61fb 2024-12-04T15:21:57,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/da2549b02e1b4290b49436b9ac5bfa9c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c 2024-12-04T15:21:57,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325777724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c, entries=150, sequenceid=17, filesize=30.2 K 2024-12-04T15:21:57,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325777726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325777729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-04T15:21:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:57,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325777735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325777736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/5b7ee73d21ce40dcb29165c97f4aed1f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5b7ee73d21ce40dcb29165c97f4aed1f 2024-12-04T15:21:57,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5b7ee73d21ce40dcb29165c97f4aed1f, entries=150, sequenceid=17, filesize=11.7 K 2024-12-04T15:21:57,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/2363d74b04054f1b8f8f4a43eb3b61fb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/2363d74b04054f1b8f8f4a43eb3b61fb 2024-12-04T15:21:57,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/2363d74b04054f1b8f8f4a43eb3b61fb, entries=150, sequenceid=17, filesize=11.7 K 2024-12-04T15:21:57,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 908ms, sequenceid=17, compaction requested=false 2024-12-04T15:21:57,884 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-04T15:21:57,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:21:57,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:57,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-04T15:21:57,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:57,923 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:21:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:21:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:21:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:21:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:57,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d3fa2a9b83164f79bdaa7ca93cbba6a6_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325717060/Put/seqid=0 2024-12-04T15:21:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-04T15:21:58,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741976_1152 (size=12154) 2024-12-04T15:21:58,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:58,094 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d3fa2a9b83164f79bdaa7ca93cbba6a6_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d3fa2a9b83164f79bdaa7ca93cbba6a6_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:58,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/8c8d3bc5f27a448a958483436033ad9f, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:21:58,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/8c8d3bc5f27a448a958483436033ad9f is 175, key is test_row_0/A:col10/1733325717060/Put/seqid=0 2024-12-04T15:21:58,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741977_1153 (size=30955) 2024-12-04T15:21:58,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:58,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:58,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325778271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325778282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325778285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325778288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325778325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325778391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325778406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325778407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325778408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325778450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,574 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/8c8d3bc5f27a448a958483436033ad9f 2024-12-04T15:21:58,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325778608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325778614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325778620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325778621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/b01575345e514d729a3b7f13cbad28c4 is 50, key is test_row_0/B:col10/1733325717060/Put/seqid=0 2024-12-04T15:21:58,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325778669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:58,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741978_1154 (size=12001) 2024-12-04T15:21:58,705 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/b01575345e514d729a3b7f13cbad28c4 2024-12-04T15:21:58,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/537f32f5951a422fa47aa5fa2687a813 is 50, key is test_row_0/C:col10/1733325717060/Put/seqid=0 2024-12-04T15:21:58,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741979_1155 (size=12001) 2024-12-04T15:21:58,798 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/537f32f5951a422fa47aa5fa2687a813 2024-12-04T15:21:58,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/8c8d3bc5f27a448a958483436033ad9f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f 2024-12-04T15:21:58,841 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f, entries=150, sequenceid=40, filesize=30.2 K 2024-12-04T15:21:58,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/b01575345e514d729a3b7f13cbad28c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/b01575345e514d729a3b7f13cbad28c4 2024-12-04T15:21:58,849 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/b01575345e514d729a3b7f13cbad28c4, entries=150, sequenceid=40, filesize=11.7 K 2024-12-04T15:21:58,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/537f32f5951a422fa47aa5fa2687a813 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/537f32f5951a422fa47aa5fa2687a813 2024-12-04T15:21:58,868 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/537f32f5951a422fa47aa5fa2687a813, entries=150, sequenceid=40, filesize=11.7 K 2024-12-04T15:21:58,870 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 947ms, sequenceid=40, compaction requested=false 2024-12-04T15:21:58,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:21:58,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:58,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-04T15:21:58,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-04T15:21:58,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-04T15:21:58,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9910 sec 2024-12-04T15:21:58,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.9970 sec 2024-12-04T15:21:58,929 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T15:21:58,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-04T15:21:58,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:21:58,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:58,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:21:58,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:58,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:21:58,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:21:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:59,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325778990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325778987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325778992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325778994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325779005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-04T15:21:59,014 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-04T15:21:59,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:21:59,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-04T15:21:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:21:59,018 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:21:59,019 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:21:59,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:21:59,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204785c364835ac4a7492f5a64cec3c836a_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325718922/Put/seqid=0 2024-12-04T15:21:59,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325779116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325779115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325779116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:21:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325779116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325779116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741980_1156 (size=12154) 2024-12-04T15:21:59,136 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:21:59,172 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:21:59,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:59,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,173 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,180 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204785c364835ac4a7492f5a64cec3c836a_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204785c364835ac4a7492f5a64cec3c836a_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:21:59,185 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/d6ec2b06a136467dbd353f5fafbd5d29, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:21:59,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/d6ec2b06a136467dbd353f5fafbd5d29 is 175, key is test_row_0/A:col10/1733325718922/Put/seqid=0 2024-12-04T15:21:59,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741981_1157 (size=30955) 2024-12-04T15:21:59,265 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/d6ec2b06a136467dbd353f5fafbd5d29 2024-12-04T15:21:59,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325779319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:21:59,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325779321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325779323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325779324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325779324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:21:59,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:59,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/49a8e2bfca07463bbc4d6014f98869ea is 50, key is test_row_0/B:col10/1733325718922/Put/seqid=0 2024-12-04T15:21:59,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741982_1158 (size=12001) 2024-12-04T15:21:59,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:21:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:21:59,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325779626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325779629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325779630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325779631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:21:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325779631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,668 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:21:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/49a8e2bfca07463bbc4d6014f98869ea 2024-12-04T15:21:59,823 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:21:59,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:59,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/0fe3fe6d404044a5ab4b9c63c1966ab2 is 50, key is test_row_0/C:col10/1733325718922/Put/seqid=0 2024-12-04T15:21:59,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741983_1159 (size=12001) 2024-12-04T15:21:59,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/0fe3fe6d404044a5ab4b9c63c1966ab2 2024-12-04T15:21:59,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/d6ec2b06a136467dbd353f5fafbd5d29 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29 2024-12-04T15:21:59,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29, entries=150, sequenceid=56, filesize=30.2 K 2024-12-04T15:21:59,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/49a8e2bfca07463bbc4d6014f98869ea as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/49a8e2bfca07463bbc4d6014f98869ea 2024-12-04T15:21:59,983 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:21:59,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:21:59,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:21:59,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:21:59,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/49a8e2bfca07463bbc4d6014f98869ea, entries=150, sequenceid=56, filesize=11.7 K 2024-12-04T15:21:59,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:21:59,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/0fe3fe6d404044a5ab4b9c63c1966ab2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/0fe3fe6d404044a5ab4b9c63c1966ab2 2024-12-04T15:22:00,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/0fe3fe6d404044a5ab4b9c63c1966ab2, entries=150, sequenceid=56, filesize=11.7 K 2024-12-04T15:22:00,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1051ms, sequenceid=56, compaction requested=true 2024-12-04T15:22:00,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:00,005 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:00,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:00,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:00,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:00,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:00,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:00,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:00,006 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:00,006 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:00,006 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,006 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=90.7 K 2024-12-04T15:22:00,006 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,007 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29] 2024-12-04T15:22:00,007 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting da2549b02e1b4290b49436b9ac5bfa9c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733325716973 2024-12-04T15:22:00,008 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c8d3bc5f27a448a958483436033ad9f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733325717052 2024-12-04T15:22:00,008 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:00,008 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d6ec2b06a136467dbd353f5fafbd5d29, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325718264 2024-12-04T15:22:00,009 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:00,009 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:00,010 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,010 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5b7ee73d21ce40dcb29165c97f4aed1f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/b01575345e514d729a3b7f13cbad28c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/49a8e2bfca07463bbc4d6014f98869ea] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=35.2 K 2024-12-04T15:22:00,010 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b7ee73d21ce40dcb29165c97f4aed1f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733325716973 2024-12-04T15:22:00,011 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b01575345e514d729a3b7f13cbad28c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733325717052 2024-12-04T15:22:00,011 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49a8e2bfca07463bbc4d6014f98869ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325718264 2024-12-04T15:22:00,037 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#139 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:00,038 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/f7096d0e25824b3b9e4a79ff653f61eb is 50, key is test_row_0/B:col10/1733325718922/Put/seqid=0 2024-12-04T15:22:00,043 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:00,079 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412044bd0491c59134b7699dee103609e8312_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:00,086 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412044bd0491c59134b7699dee103609e8312_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:00,087 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044bd0491c59134b7699dee103609e8312_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:22:00,144 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:22:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:00,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:00,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:00,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741985_1161 (size=4469) 2024-12-04T15:22:00,171 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#140 average throughput is 0.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:00,174 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/d71f99ca13374310a5f4f29598f6fa07 is 175, key is test_row_0/A:col10/1733325718922/Put/seqid=0 2024-12-04T15:22:00,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741984_1160 (size=12104) 2024-12-04T15:22:00,204 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/f7096d0e25824b3b9e4a79ff653f61eb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/f7096d0e25824b3b9e4a79ff653f61eb 2024-12-04T15:22:00,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fed18bfe7f36403ba413ba7a2f0a52c8_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325718993/Put/seqid=0 2024-12-04T15:22:00,211 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into f7096d0e25824b3b9e4a79ff653f61eb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:00,211 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:00,211 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=13, startTime=1733325720005; duration=0sec 2024-12-04T15:22:00,211 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:00,211 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:00,212 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:00,213 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:00,213 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:00,213 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,213 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/2363d74b04054f1b8f8f4a43eb3b61fb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/537f32f5951a422fa47aa5fa2687a813, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/0fe3fe6d404044a5ab4b9c63c1966ab2] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=35.2 K 2024-12-04T15:22:00,214 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2363d74b04054f1b8f8f4a43eb3b61fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733325716973 2024-12-04T15:22:00,214 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 537f32f5951a422fa47aa5fa2687a813, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733325717052 2024-12-04T15:22:00,215 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fe3fe6d404044a5ab4b9c63c1966ab2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325718264 2024-12-04T15:22:00,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325780201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325780204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325780205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325780220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325780196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741986_1162 (size=31058) 2024-12-04T15:22:00,287 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#142 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:00,288 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/791a466361c741179098ad87e170ca89 is 50, key is test_row_0/C:col10/1733325718922/Put/seqid=0 2024-12-04T15:22:00,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741987_1163 (size=14594) 2024-12-04T15:22:00,292 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:00,303 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fed18bfe7f36403ba413ba7a2f0a52c8_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fed18bfe7f36403ba413ba7a2f0a52c8_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:00,304 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,305 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a100cd51cc8845e08826bb0ddc244642, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:00,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a100cd51cc8845e08826bb0ddc244642 is 175, key is test_row_0/A:col10/1733325718993/Put/seqid=0 2024-12-04T15:22:00,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:00,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,307 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/d71f99ca13374310a5f4f29598f6fa07 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d71f99ca13374310a5f4f29598f6fa07 2024-12-04T15:22:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,329 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into d71f99ca13374310a5f4f29598f6fa07(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:00,329 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:00,329 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=13, startTime=1733325720004; duration=0sec 2024-12-04T15:22:00,329 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:00,329 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:00,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741988_1164 (size=12104) 2024-12-04T15:22:00,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741989_1165 (size=39549) 2024-12-04T15:22:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325780338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,346 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/791a466361c741179098ad87e170ca89 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/791a466361c741179098ad87e170ca89 2024-12-04T15:22:00,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325780338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325780339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325780341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,358 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 791a466361c741179098ad87e170ca89(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:00,358 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:00,358 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=13, startTime=1733325720005; duration=0sec 2024-12-04T15:22:00,358 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:00,358 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:00,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325780356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,460 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:00,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:00,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325780555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325780557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325780557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325780557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325780561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,614 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:00,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,748 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a100cd51cc8845e08826bb0ddc244642 2024-12-04T15:22:00,770 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:00,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:00,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/9eda243979c342678b114f9ffebfb392 is 50, key is test_row_0/B:col10/1733325718993/Put/seqid=0 2024-12-04T15:22:00,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741990_1166 (size=12001) 2024-12-04T15:22:00,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/9eda243979c342678b114f9ffebfb392 2024-12-04T15:22:00,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/fb79789f8ae84b62b17ffca061d8d6c7 is 50, key is test_row_0/C:col10/1733325718993/Put/seqid=0 2024-12-04T15:22:00,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325780860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325780866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325780865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325780869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325780869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741991_1167 (size=12001) 2024-12-04T15:22:00,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/fb79789f8ae84b62b17ffca061d8d6c7 2024-12-04T15:22:00,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a100cd51cc8845e08826bb0ddc244642 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642 2024-12-04T15:22:00,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642, entries=200, sequenceid=77, filesize=38.6 K 2024-12-04T15:22:00,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/9eda243979c342678b114f9ffebfb392 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/9eda243979c342678b114f9ffebfb392 2024-12-04T15:22:00,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/9eda243979c342678b114f9ffebfb392, entries=150, sequenceid=77, filesize=11.7 K 2024-12-04T15:22:00,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/fb79789f8ae84b62b17ffca061d8d6c7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fb79789f8ae84b62b17ffca061d8d6c7 2024-12-04T15:22:00,927 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:00,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:00,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:00,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:00,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:00,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fb79789f8ae84b62b17ffca061d8d6c7, entries=150, sequenceid=77, filesize=11.7 K 2024-12-04T15:22:00,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 793ms, sequenceid=77, compaction requested=false 2024-12-04T15:22:00,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:01,082 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-04T15:22:01,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:01,083 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:22:01,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:01,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:01,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:01,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:01,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:01,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:22:01,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412049dd8550f70604f638a688445a431b544_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325720216/Put/seqid=0 2024-12-04T15:22:01,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741992_1168 (size=12154) 2024-12-04T15:22:01,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325781425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325781430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325781431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325781440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325781441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325781542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325781544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325781551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325781542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325781554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:01,597 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412049dd8550f70604f638a688445a431b544_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412049dd8550f70604f638a688445a431b544_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:01,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a63b2954dcd347629b2853517bee957b, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:01,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a63b2954dcd347629b2853517bee957b is 175, key is test_row_0/A:col10/1733325720216/Put/seqid=0 2024-12-04T15:22:01,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741993_1169 (size=30955) 2024-12-04T15:22:01,645 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a63b2954dcd347629b2853517bee957b 2024-12-04T15:22:01,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/09d3adada5204d5291610a193c44ddf4 is 50, key is test_row_0/B:col10/1733325720216/Put/seqid=0 2024-12-04T15:22:01,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741994_1170 (size=12001) 2024-12-04T15:22:01,718 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/09d3adada5204d5291610a193c44ddf4 2024-12-04T15:22:01,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325781780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325781780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/5b793bac534547deb9f7b4b6400223f1 is 50, key is test_row_0/C:col10/1733325720216/Put/seqid=0 2024-12-04T15:22:01,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325781781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325781792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:01,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325781793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:01,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741995_1171 (size=12001) 2024-12-04T15:22:02,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325782100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325782101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325782102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325782116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325782128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,230 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/5b793bac534547deb9f7b4b6400223f1 2024-12-04T15:22:02,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a63b2954dcd347629b2853517bee957b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b 2024-12-04T15:22:02,244 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b, entries=150, sequenceid=95, filesize=30.2 K 2024-12-04T15:22:02,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/09d3adada5204d5291610a193c44ddf4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/09d3adada5204d5291610a193c44ddf4 2024-12-04T15:22:02,251 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/09d3adada5204d5291610a193c44ddf4, entries=150, sequenceid=95, filesize=11.7 K 2024-12-04T15:22:02,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/5b793bac534547deb9f7b4b6400223f1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/5b793bac534547deb9f7b4b6400223f1 2024-12-04T15:22:02,257 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/5b793bac534547deb9f7b4b6400223f1, entries=150, sequenceid=95, filesize=11.7 K 2024-12-04T15:22:02,259 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1176ms, sequenceid=95, compaction requested=true 2024-12-04T15:22:02,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:02,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:02,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-04T15:22:02,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-04T15:22:02,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-04T15:22:02,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2410 sec 2024-12-04T15:22:02,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 3.2490 sec 2024-12-04T15:22:02,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:02,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:22:02,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:02,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:02,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:02,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:02,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:02,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325782648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325782652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325782652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046d18090e3ed94c26b22038a46b690526_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:02,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325782655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325782664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741996_1172 (size=17034) 2024-12-04T15:22:02,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325782760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325782769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325782771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325782773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325782972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325782979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325782984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:02,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325782992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,148 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-04T15:22:03,150 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-04T15:22:03,151 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-04T15:22:03,153 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:03,154 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:03,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:03,166 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046d18090e3ed94c26b22038a46b690526_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046d18090e3ed94c26b22038a46b690526_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:03,168 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/45ed29495496495c83c6fd2c12a7314e, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:03,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/45ed29495496495c83c6fd2c12a7314e is 175, key is test_row_0/A:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:03,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741997_1173 (size=48139) 2024-12-04T15:22:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:03,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325783279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325783289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325783297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325783300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-04T15:22:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:03,463 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-04T15:22:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-04T15:22:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,624 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/45ed29495496495c83c6fd2c12a7314e 2024-12-04T15:22:03,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-04T15:22:03,632 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-04T15:22:03,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1f33e6b6517248d98b98e5b50a79c055 is 50, key is test_row_0/B:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:03,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741998_1174 (size=12001) 2024-12-04T15:22:03,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1f33e6b6517248d98b98e5b50a79c055 2024-12-04T15:22:03,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325783706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:03,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/943890f65d574cbb871e387cc81bedb6 is 50, key is test_row_0/C:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:03,777 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-04T15:22:03,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:03,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:03,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325783792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741999_1175 (size=12001) 2024-12-04T15:22:03,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/943890f65d574cbb871e387cc81bedb6 2024-12-04T15:22:03,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325783800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325783803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/45ed29495496495c83c6fd2c12a7314e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e 2024-12-04T15:22:03,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325783808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e, entries=250, sequenceid=119, filesize=47.0 K 2024-12-04T15:22:03,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1f33e6b6517248d98b98e5b50a79c055 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1f33e6b6517248d98b98e5b50a79c055 2024-12-04T15:22:03,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1f33e6b6517248d98b98e5b50a79c055, entries=150, sequenceid=119, filesize=11.7 K 2024-12-04T15:22:03,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/943890f65d574cbb871e387cc81bedb6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/943890f65d574cbb871e387cc81bedb6 2024-12-04T15:22:03,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/943890f65d574cbb871e387cc81bedb6, entries=150, sequenceid=119, filesize=11.7 K 2024-12-04T15:22:03,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1258ms, sequenceid=119, compaction requested=true 2024-12-04T15:22:03,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:03,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:03,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:03,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:03,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:03,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:03,875 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:03,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:03,875 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:03,888 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 149701 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:03,889 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:03,889 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,889 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d71f99ca13374310a5f4f29598f6fa07, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=146.2 K 2024-12-04T15:22:03,889 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,889 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d71f99ca13374310a5f4f29598f6fa07, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e] 2024-12-04T15:22:03,889 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:03,890 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:03,890 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,890 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/f7096d0e25824b3b9e4a79ff653f61eb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/9eda243979c342678b114f9ffebfb392, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/09d3adada5204d5291610a193c44ddf4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1f33e6b6517248d98b98e5b50a79c055] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=47.0 K 2024-12-04T15:22:03,891 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f7096d0e25824b3b9e4a79ff653f61eb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325718264 2024-12-04T15:22:03,892 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d71f99ca13374310a5f4f29598f6fa07, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325718264 2024-12-04T15:22:03,892 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9eda243979c342678b114f9ffebfb392, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733325718992 2024-12-04T15:22:03,893 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a100cd51cc8845e08826bb0ddc244642, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733325718981 2024-12-04T15:22:03,896 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 09d3adada5204d5291610a193c44ddf4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325720175 2024-12-04T15:22:03,896 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a63b2954dcd347629b2853517bee957b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325720175 2024-12-04T15:22:03,897 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f33e6b6517248d98b98e5b50a79c055, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733325721425 2024-12-04T15:22:03,898 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45ed29495496495c83c6fd2c12a7314e, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733325721425 2024-12-04T15:22:03,922 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#151 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:03,923 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/0318a86d46c0457d986d9228dcab7a00 is 50, key is test_row_0/B:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:03,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:03,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-04T15:22:03,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:03,936 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:22:03,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:03,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:03,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:03,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:03,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:03,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:03,938 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:03,944 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120447f75cbe4b164f2aa38e2652a170d159_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:03,948 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120447f75cbe4b164f2aa38e2652a170d159_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:03,952 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120447f75cbe4b164f2aa38e2652a170d159_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:03,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742000_1176 (size=12241) 2024-12-04T15:22:03,994 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/0318a86d46c0457d986d9228dcab7a00 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/0318a86d46c0457d986d9228dcab7a00 2024-12-04T15:22:04,002 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 0318a86d46c0457d986d9228dcab7a00(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:04,002 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:04,003 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=12, startTime=1733325723875; duration=0sec 2024-12-04T15:22:04,003 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:04,003 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:04,003 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:04,009 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:04,009 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:04,009 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:04,009 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/791a466361c741179098ad87e170ca89, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fb79789f8ae84b62b17ffca061d8d6c7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/5b793bac534547deb9f7b4b6400223f1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/943890f65d574cbb871e387cc81bedb6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=47.0 K 2024-12-04T15:22:04,010 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 791a466361c741179098ad87e170ca89, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325718264 2024-12-04T15:22:04,011 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fb79789f8ae84b62b17ffca061d8d6c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733325718992 2024-12-04T15:22:04,012 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b793bac534547deb9f7b4b6400223f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325720175 2024-12-04T15:22:04,012 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 943890f65d574cbb871e387cc81bedb6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733325721425 2024-12-04T15:22:04,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e7f03d373a524bd0acce528c5b917908_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325722645/Put/seqid=0 2024-12-04T15:22:04,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742001_1177 (size=4469) 2024-12-04T15:22:04,052 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#154 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:04,053 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/552d56bb8d3f41c0b143cbeb98a8e8e3 is 50, key is test_row_0/C:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:04,055 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#152 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:04,056 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f79295b59a794efcba1362ba9a2c269e is 175, key is test_row_0/A:col10/1733325721436/Put/seqid=0 2024-12-04T15:22:04,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742002_1178 (size=12254) 2024-12-04T15:22:04,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742004_1180 (size=31195) 2024-12-04T15:22:04,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742003_1179 (size=12241) 2024-12-04T15:22:04,147 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f79295b59a794efcba1362ba9a2c269e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f79295b59a794efcba1362ba9a2c269e 2024-12-04T15:22:04,154 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/552d56bb8d3f41c0b143cbeb98a8e8e3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/552d56bb8d3f41c0b143cbeb98a8e8e3 2024-12-04T15:22:04,159 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into f79295b59a794efcba1362ba9a2c269e(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:04,159 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:04,159 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=12, startTime=1733325723874; duration=0sec 2024-12-04T15:22:04,159 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:04,159 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:04,165 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 552d56bb8d3f41c0b143cbeb98a8e8e3(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:04,165 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:04,165 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=12, startTime=1733325723875; duration=0sec 2024-12-04T15:22:04,165 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:04,165 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:04,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:04,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:04,537 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e7f03d373a524bd0acce528c5b917908_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e7f03d373a524bd0acce528c5b917908_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/b8cc81042f8d49239915cc2ed1ab6e65, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/b8cc81042f8d49239915cc2ed1ab6e65 is 175, key is test_row_0/A:col10/1733325722645/Put/seqid=0 2024-12-04T15:22:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742005_1181 (size=31055) 2024-12-04T15:22:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:04,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:04,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:04,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325784867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:04,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:04,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325784874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:04,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:04,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325784877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:04,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:04,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325784877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:04,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:04,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325784984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:04,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:04,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325784986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:04,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325784993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325784996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,013 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/b8cc81042f8d49239915cc2ed1ab6e65 2024-12-04T15:22:05,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/4136d4e1100647b988ac6a15815c8aca is 50, key is test_row_0/B:col10/1733325722645/Put/seqid=0 2024-12-04T15:22:05,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742006_1182 (size=12101) 2024-12-04T15:22:05,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325785195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325785195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325785203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325785225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:05,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325785498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325785506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325785508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325785540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,570 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/4136d4e1100647b988ac6a15815c8aca 2024-12-04T15:22:05,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/37533b799c4e43de83853bf3ea108dc5 is 50, key is test_row_0/C:col10/1733325722645/Put/seqid=0 2024-12-04T15:22:05,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742007_1183 (size=12101) 2024-12-04T15:22:05,621 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/37533b799c4e43de83853bf3ea108dc5 2024-12-04T15:22:05,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/b8cc81042f8d49239915cc2ed1ab6e65 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65 2024-12-04T15:22:05,653 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65, entries=150, sequenceid=131, filesize=30.3 K 2024-12-04T15:22:05,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/4136d4e1100647b988ac6a15815c8aca as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4136d4e1100647b988ac6a15815c8aca 2024-12-04T15:22:05,666 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4136d4e1100647b988ac6a15815c8aca, entries=150, sequenceid=131, filesize=11.8 K 2024-12-04T15:22:05,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/37533b799c4e43de83853bf3ea108dc5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/37533b799c4e43de83853bf3ea108dc5 2024-12-04T15:22:05,675 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/37533b799c4e43de83853bf3ea108dc5, entries=150, sequenceid=131, filesize=11.8 K 2024-12-04T15:22:05,677 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1741ms, sequenceid=131, compaction requested=false 2024-12-04T15:22:05,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:05,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:05,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-04T15:22:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-04T15:22:05,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-04T15:22:05,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5280 sec 2024-12-04T15:22:05,686 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.5340 sec 2024-12-04T15:22:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:05,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:22:05,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:05,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:05,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:05,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:05,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:05,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:05,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c25c7efee61f43d4a550019484dd5e68_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:05,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325785828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742008_1184 (size=12304) 2024-12-04T15:22:05,847 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:05,856 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c25c7efee61f43d4a550019484dd5e68_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c25c7efee61f43d4a550019484dd5e68_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:05,857 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/3047a5cafd3a467892a7d44f78c5dff2, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:05,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/3047a5cafd3a467892a7d44f78c5dff2 is 175, key is test_row_0/A:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:05,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742009_1185 (size=31105) 2024-12-04T15:22:05,913 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/3047a5cafd3a467892a7d44f78c5dff2 2024-12-04T15:22:05,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:05,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325785944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:05,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1d253cfd407342b8a289dda98a58141f is 50, key is test_row_0/B:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:06,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742010_1186 (size=12151) 2024-12-04T15:22:06,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325786013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:06,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:06,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325786013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:06,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325786019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:06,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325786048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:06,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325786152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:06,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1d253cfd407342b8a289dda98a58141f 2024-12-04T15:22:06,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/263c59f70cbc473cb811aff78c06ca47 is 50, key is test_row_0/C:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:06,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742011_1187 (size=12151) 2024-12-04T15:22:06,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/263c59f70cbc473cb811aff78c06ca47 2024-12-04T15:22:06,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:06,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325786485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:06,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/3047a5cafd3a467892a7d44f78c5dff2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2 2024-12-04T15:22:06,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2, entries=150, sequenceid=160, filesize=30.4 K 2024-12-04T15:22:06,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1d253cfd407342b8a289dda98a58141f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1d253cfd407342b8a289dda98a58141f 2024-12-04T15:22:06,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1d253cfd407342b8a289dda98a58141f, entries=150, sequenceid=160, filesize=11.9 K 2024-12-04T15:22:06,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/263c59f70cbc473cb811aff78c06ca47 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/263c59f70cbc473cb811aff78c06ca47 2024-12-04T15:22:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/263c59f70cbc473cb811aff78c06ca47, entries=150, sequenceid=160, filesize=11.9 K 2024-12-04T15:22:06,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 770ms, sequenceid=160, compaction requested=true 2024-12-04T15:22:06,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:06,519 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:06,520 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:06,520 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:06,521 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:06,521 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f79295b59a794efcba1362ba9a2c269e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=91.2 K 2024-12-04T15:22:06,521 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:06,521 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f79295b59a794efcba1362ba9a2c269e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2] 2024-12-04T15:22:06,521 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f79295b59a794efcba1362ba9a2c269e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733325721425 2024-12-04T15:22:06,522 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b8cc81042f8d49239915cc2ed1ab6e65, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733325722645 2024-12-04T15:22:06,522 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3047a5cafd3a467892a7d44f78c5dff2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733325724858 2024-12-04T15:22:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:06,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:06,523 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:06,526 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:06,527 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:06,527 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:06,527 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/0318a86d46c0457d986d9228dcab7a00, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4136d4e1100647b988ac6a15815c8aca, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1d253cfd407342b8a289dda98a58141f] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=35.6 K 2024-12-04T15:22:06,527 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0318a86d46c0457d986d9228dcab7a00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733325721425 2024-12-04T15:22:06,528 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4136d4e1100647b988ac6a15815c8aca, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733325722645 2024-12-04T15:22:06,529 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d253cfd407342b8a289dda98a58141f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733325724858 2024-12-04T15:22:06,536 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:06,557 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204b717349bbc584dceafe65ac3eb719c42_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:06,559 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204b717349bbc584dceafe65ac3eb719c42_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:06,559 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204b717349bbc584dceafe65ac3eb719c42_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,578 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#161 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:06,579 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/a8580831c3a94dc9a641b8e9b4d46ddb is 50, key is test_row_0/B:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742012_1188 (size=4469) 2024-12-04T15:22:06,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,617 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#160 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,618 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a31ddccb1cd040abaf3061820278012f is 175, key is test_row_0/A:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742013_1189 (size=12493) 2024-12-04T15:22:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742014_1190 (size=31447) 2024-12-04T15:22:06,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:06,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:07,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:07,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:07,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:07,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:07,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:07,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:07,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:07,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204302f5a7d2d764c0ab316fb9459aa45ac_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325725823/Put/seqid=0 2024-12-04T15:22:07,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,068 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/a8580831c3a94dc9a641b8e9b4d46ddb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/a8580831c3a94dc9a641b8e9b4d46ddb 2024-12-04T15:22:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742015_1191 (size=12304) 2024-12-04T15:22:07,096 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into a8580831c3a94dc9a641b8e9b4d46ddb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:07,096 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:07,096 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=13, startTime=1733325726523; duration=0sec 2024-12-04T15:22:07,096 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:07,096 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:07,096 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:07,100 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a31ddccb1cd040abaf3061820278012f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a31ddccb1cd040abaf3061820278012f 2024-12-04T15:22:07,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,108 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:07,108 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:07,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,108 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,108 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/552d56bb8d3f41c0b143cbeb98a8e8e3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/37533b799c4e43de83853bf3ea108dc5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/263c59f70cbc473cb811aff78c06ca47] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=35.6 K 2024-12-04T15:22:07,109 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 552d56bb8d3f41c0b143cbeb98a8e8e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733325721425 2024-12-04T15:22:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,110 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37533b799c4e43de83853bf3ea108dc5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733325722645 2024-12-04T15:22:07,111 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 263c59f70cbc473cb811aff78c06ca47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733325724858 2024-12-04T15:22:07,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,114 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into a31ddccb1cd040abaf3061820278012f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:07,114 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:07,115 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=13, startTime=1733325726518; duration=0sec 2024-12-04T15:22:07,115 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:07,115 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:07,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,128 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#163 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,131 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/77088d9e748843c18859bed8c017819a is 50, key is test_row_0/C:col10/1733325724865/Put/seqid=0 2024-12-04T15:22:07,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325787140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325787153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325787153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325787153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742016_1192 (size=12493) 2024-12-04T15:22:07,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325787148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,180 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/77088d9e748843c18859bed8c017819a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/77088d9e748843c18859bed8c017819a 2024-12-04T15:22:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,186 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 77088d9e748843c18859bed8c017819a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,186 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:07,186 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=13, startTime=1733325726524; duration=0sec 2024-12-04T15:22:07,186 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:07,186 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:07,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325787271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325787273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325787273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325787274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325787276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-04T15:22:07,292 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-04T15:22:07,305 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:07,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-04T15:22:07,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:07,309 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:07,310 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:07,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:07,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:07,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:07,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:07,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325787479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325787480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325787481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325787482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325787492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,497 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:07,508 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204302f5a7d2d764c0ab316fb9459aa45ac_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204302f5a7d2d764c0ab316fb9459aa45ac_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:07,514 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/48cbcb656e39452faef886ea907b68b1, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:07,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/48cbcb656e39452faef886ea907b68b1 is 175, key is test_row_0/A:col10/1733325725823/Put/seqid=0 2024-12-04T15:22:07,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742017_1193 (size=31101) 2024-12-04T15:22:07,573 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/48cbcb656e39452faef886ea907b68b1 2024-12-04T15:22:07,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1a3a576c4cdd48d780b279d34a8cd222 is 50, key is test_row_0/B:col10/1733325725823/Put/seqid=0 2024-12-04T15:22:07,618 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:07,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:07,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:07,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742018_1194 (size=9757) 2024-12-04T15:22:07,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1a3a576c4cdd48d780b279d34a8cd222 2024-12-04T15:22:07,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/9cdba9f2258a47e78524694ddb125b7f is 50, key is test_row_0/C:col10/1733325725823/Put/seqid=0 2024-12-04T15:22:07,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742019_1195 (size=9757) 2024-12-04T15:22:07,784 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:07,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:07,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325787784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325787792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325787793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325787796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:07,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325787804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:07,944 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:07,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:07,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:07,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:07,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,105 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:08,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:08,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/9cdba9f2258a47e78524694ddb125b7f 2024-12-04T15:22:08,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/48cbcb656e39452faef886ea907b68b1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1 2024-12-04T15:22:08,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1, entries=150, sequenceid=171, filesize=30.4 K 2024-12-04T15:22:08,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:08,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:08,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/1a3a576c4cdd48d780b279d34a8cd222 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1a3a576c4cdd48d780b279d34a8cd222 2024-12-04T15:22:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1a3a576c4cdd48d780b279d34a8cd222, entries=100, sequenceid=171, filesize=9.5 K 2024-12-04T15:22:08,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/9cdba9f2258a47e78524694ddb125b7f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/9cdba9f2258a47e78524694ddb125b7f 2024-12-04T15:22:08,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/9cdba9f2258a47e78524694ddb125b7f, entries=100, sequenceid=171, filesize=9.5 K 2024-12-04T15:22:08,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1293ms, sequenceid=171, compaction requested=false 2024-12-04T15:22:08,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:08,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:22:08,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:08,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:08,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:08,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:08,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:08,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:08,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325788324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325788328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325788329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325788331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325788333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044a5f54058e8e420187bc07b47021107c_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:08,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742020_1196 (size=12304) 2024-12-04T15:22:08,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:08,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:08,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:08,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325788439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325788442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325788442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325788444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325788448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,586 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:08,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:08,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325788647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325788648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325788652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325788652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325788665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,743 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:08,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:08,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,801 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:08,840 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044a5f54058e8e420187bc07b47021107c_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044a5f54058e8e420187bc07b47021107c_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:08,852 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/0a2e507357f14f9d81d8ac49719a626f, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:08,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/0a2e507357f14f9d81d8ac49719a626f is 175, key is test_row_0/A:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:08,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742021_1197 (size=31105) 2024-12-04T15:22:08,900 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:08,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:08,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:08,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325788952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325788959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325788959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325788959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:08,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325788968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,063 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,216 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,295 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/0a2e507357f14f9d81d8ac49719a626f 2024-12-04T15:22:09,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/8dfac8a07b574261ac5f8947f16b976d is 50, key is test_row_0/B:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:09,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742022_1198 (size=12151) 2024-12-04T15:22:09,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:09,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:09,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:09,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:09,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325789462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:09,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325789469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:09,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325789469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:09,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325789482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:09,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325789482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:09,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:09,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,645 INFO [master/645c2dbfef2e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T15:22:09,645 INFO [master/645c2dbfef2e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T15:22:09,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:09,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/8dfac8a07b574261ac5f8947f16b976d 2024-12-04T15:22:09,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/11365962712042138458fe09e1cf12ee is 50, key is test_row_0/C:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:09,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742023_1199 (size=12151) 2024-12-04T15:22:09,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/11365962712042138458fe09e1cf12ee 2024-12-04T15:22:09,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/0a2e507357f14f9d81d8ac49719a626f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f 2024-12-04T15:22:09,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f, entries=150, sequenceid=200, filesize=30.4 K 2024-12-04T15:22:09,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/8dfac8a07b574261ac5f8947f16b976d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/8dfac8a07b574261ac5f8947f16b976d 2024-12-04T15:22:09,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/8dfac8a07b574261ac5f8947f16b976d, entries=150, sequenceid=200, filesize=11.9 K 2024-12-04T15:22:09,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/11365962712042138458fe09e1cf12ee as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/11365962712042138458fe09e1cf12ee 2024-12-04T15:22:09,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/11365962712042138458fe09e1cf12ee, entries=150, sequenceid=200, filesize=11.9 K 2024-12-04T15:22:09,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1572ms, sequenceid=200, compaction requested=true 2024-12-04T15:22:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:09,881 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:09,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:09,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:09,881 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:09,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:09,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:09,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:09,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:09,882 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:09,882 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:09,882 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:09,882 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,882 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:09,883 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,883 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a31ddccb1cd040abaf3061820278012f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=91.5 K 2024-12-04T15:22:09,883 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/a8580831c3a94dc9a641b8e9b4d46ddb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1a3a576c4cdd48d780b279d34a8cd222, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/8dfac8a07b574261ac5f8947f16b976d] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=33.6 K 2024-12-04T15:22:09,883 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,883 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a31ddccb1cd040abaf3061820278012f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f] 2024-12-04T15:22:09,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a8580831c3a94dc9a641b8e9b4d46ddb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733325724858 2024-12-04T15:22:09,883 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a31ddccb1cd040abaf3061820278012f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733325724858 2024-12-04T15:22:09,884 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a3a576c4cdd48d780b279d34a8cd222, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733325725823 2024-12-04T15:22:09,884 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48cbcb656e39452faef886ea907b68b1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733325725774 2024-12-04T15:22:09,884 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dfac8a07b574261ac5f8947f16b976d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733325727145 2024-12-04T15:22:09,884 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a2e507357f14f9d81d8ac49719a626f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733325727145 2024-12-04T15:22:09,898 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:09,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-04T15:22:09,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:09,899 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:09,900 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#169 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:09,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:09,901 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/046da8b285294b81acd748c92801df40 is 50, key is test_row_0/B:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:09,912 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:09,932 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412046c70fabb534448bfa515b5b9a5a28d29_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:09,934 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412046c70fabb534448bfa515b5b9a5a28d29_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:09,934 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046c70fabb534448bfa515b5b9a5a28d29_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:09,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044e0ca7f4666d41fba9b549a526053d6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325728329/Put/seqid=0 2024-12-04T15:22:09,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742024_1200 (size=12595) 2024-12-04T15:22:10,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742026_1202 (size=12304) 2024-12-04T15:22:10,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:10,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742025_1201 (size=4469) 2024-12-04T15:22:10,011 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#170 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:10,012 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/03ac288541be4f01ad67b4c69deda3a4 is 175, key is test_row_0/A:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:10,016 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044e0ca7f4666d41fba9b549a526053d6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044e0ca7f4666d41fba9b549a526053d6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:10,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/ddac2f67860c4358a787fe0178533f1f, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:10,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/ddac2f67860c4358a787fe0178533f1f is 175, key is test_row_0/A:col10/1733325728329/Put/seqid=0 2024-12-04T15:22:10,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742027_1203 (size=31549) 2024-12-04T15:22:10,031 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/03ac288541be4f01ad67b4c69deda3a4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/03ac288541be4f01ad67b4c69deda3a4 2024-12-04T15:22:10,049 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 03ac288541be4f01ad67b4c69deda3a4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:10,049 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:10,049 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=13, startTime=1733325729877; duration=0sec 2024-12-04T15:22:10,049 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:10,049 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:10,050 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:10,052 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:10,052 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:10,052 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:10,052 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/77088d9e748843c18859bed8c017819a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/9cdba9f2258a47e78524694ddb125b7f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/11365962712042138458fe09e1cf12ee] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=33.6 K 2024-12-04T15:22:10,053 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77088d9e748843c18859bed8c017819a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733325724858 2024-12-04T15:22:10,059 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cdba9f2258a47e78524694ddb125b7f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733325725823 2024-12-04T15:22:10,060 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11365962712042138458fe09e1cf12ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733325727145 2024-12-04T15:22:10,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742028_1204 (size=31105) 2024-12-04T15:22:10,077 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/ddac2f67860c4358a787fe0178533f1f 2024-12-04T15:22:10,098 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#172 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:10,098 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/1a47af261c7b4657be557066d6800300 is 50, key is test_row_0/C:col10/1733325727150/Put/seqid=0 2024-12-04T15:22:10,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/ebf2b71d6fb84a11968676b56b38cae2 is 50, key is test_row_0/B:col10/1733325728329/Put/seqid=0 2024-12-04T15:22:10,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742029_1205 (size=12595) 2024-12-04T15:22:10,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742030_1206 (size=12151) 2024-12-04T15:22:10,179 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/ebf2b71d6fb84a11968676b56b38cae2 2024-12-04T15:22:10,216 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/1a47af261c7b4657be557066d6800300 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/1a47af261c7b4657be557066d6800300 2024-12-04T15:22:10,229 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 1a47af261c7b4657be557066d6800300(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:10,229 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:10,229 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=13, startTime=1733325729882; duration=0sec 2024-12-04T15:22:10,230 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:10,230 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:10,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/3e97a03a3ed346b59783f0f07adbbab3 is 50, key is test_row_0/C:col10/1733325728329/Put/seqid=0 2024-12-04T15:22:10,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742031_1207 (size=12151) 2024-12-04T15:22:10,395 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/046da8b285294b81acd748c92801df40 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/046da8b285294b81acd748c92801df40 2024-12-04T15:22:10,408 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 046da8b285294b81acd748c92801df40(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:10,408 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:10,412 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=13, startTime=1733325729881; duration=0sec 2024-12-04T15:22:10,412 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:10,412 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:10,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:10,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325790528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325790532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325790535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325790536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325790536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325790644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325790648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325790647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325790652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325790656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,698 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/3e97a03a3ed346b59783f0f07adbbab3 2024-12-04T15:22:10,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/ddac2f67860c4358a787fe0178533f1f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f 2024-12-04T15:22:10,719 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f, entries=150, sequenceid=211, filesize=30.4 K 2024-12-04T15:22:10,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/ebf2b71d6fb84a11968676b56b38cae2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/ebf2b71d6fb84a11968676b56b38cae2 2024-12-04T15:22:10,726 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/ebf2b71d6fb84a11968676b56b38cae2, entries=150, sequenceid=211, filesize=11.9 K 2024-12-04T15:22:10,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/3e97a03a3ed346b59783f0f07adbbab3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/3e97a03a3ed346b59783f0f07adbbab3 2024-12-04T15:22:10,741 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/3e97a03a3ed346b59783f0f07adbbab3, entries=150, sequenceid=211, filesize=11.9 K 2024-12-04T15:22:10,742 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 843ms, sequenceid=211, compaction requested=false 2024-12-04T15:22:10,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:10,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:10,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-04T15:22:10,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-04T15:22:10,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-04T15:22:10,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4340 sec 2024-12-04T15:22:10,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.4410 sec 2024-12-04T15:22:10,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:10,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-04T15:22:10,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:10,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:10,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:10,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:10,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:10,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:10,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325790882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325790886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325790891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325790891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325790884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204f9a8d40899f44f83bb7ba18c423053c2_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325730870/Put/seqid=0 2024-12-04T15:22:10,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742032_1208 (size=14794) 2024-12-04T15:22:10,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:10,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325790995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:10,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325790997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325790997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325790997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325791006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325791208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325791208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325791208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325791211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325791208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,363 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:11,391 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204f9a8d40899f44f83bb7ba18c423053c2_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f9a8d40899f44f83bb7ba18c423053c2_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:11,401 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/562677faf213454c932e2d762be7393d, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:11,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/562677faf213454c932e2d762be7393d is 175, key is test_row_0/A:col10/1733325730870/Put/seqid=0 2024-12-04T15:22:11,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742033_1209 (size=39749) 2024-12-04T15:22:11,445 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/562677faf213454c932e2d762be7393d 2024-12-04T15:22:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-04T15:22:11,453 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-04T15:22:11,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-04T15:22:11,493 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:11,494 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:11,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:11,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/753fe9f1f3664c95bffee6a1e4fca4b2 is 50, key is test_row_0/B:col10/1733325730870/Put/seqid=0 2024-12-04T15:22:11,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325791524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325791524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325791525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325791525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325791537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742034_1210 (size=12151) 2024-12-04T15:22:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:11,654 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:11,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:11,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:11,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:11,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,662 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33167 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=645c2dbfef2e,42169,1733325683856, table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-04T15:22:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:11,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:11,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:11,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:11,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:11,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/753fe9f1f3664c95bffee6a1e4fca4b2 2024-12-04T15:22:11,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/275fb76d68ec4ebdb2656a854ce45926 is 50, key is test_row_0/C:col10/1733325730870/Put/seqid=0 2024-12-04T15:22:11,969 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:11,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:11,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:11,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:11,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:11,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:11,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742035_1211 (size=12151) 2024-12-04T15:22:11,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/275fb76d68ec4ebdb2656a854ce45926 2024-12-04T15:22:12,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/562677faf213454c932e2d762be7393d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d 2024-12-04T15:22:12,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d, entries=200, sequenceid=243, filesize=38.8 K 2024-12-04T15:22:12,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/753fe9f1f3664c95bffee6a1e4fca4b2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/753fe9f1f3664c95bffee6a1e4fca4b2 2024-12-04T15:22:12,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/753fe9f1f3664c95bffee6a1e4fca4b2, entries=150, sequenceid=243, filesize=11.9 K 2024-12-04T15:22:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/275fb76d68ec4ebdb2656a854ce45926 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/275fb76d68ec4ebdb2656a854ce45926 2024-12-04T15:22:12,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/275fb76d68ec4ebdb2656a854ce45926, entries=150, sequenceid=243, filesize=11.9 K 2024-12-04T15:22:12,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1162ms, sequenceid=243, compaction requested=true 2024-12-04T15:22:12,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:12,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:12,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:12,035 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:12,035 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:12,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:12,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:12,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:12,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:12,036 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:12,036 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:12,036 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,036 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/03ac288541be4f01ad67b4c69deda3a4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=100.0 K 2024-12-04T15:22:12,036 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,036 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/03ac288541be4f01ad67b4c69deda3a4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d] 2024-12-04T15:22:12,036 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:12,037 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:12,037 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,037 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/046da8b285294b81acd748c92801df40, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/ebf2b71d6fb84a11968676b56b38cae2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/753fe9f1f3664c95bffee6a1e4fca4b2] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=36.0 K 2024-12-04T15:22:12,037 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03ac288541be4f01ad67b4c69deda3a4, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733325727145 2024-12-04T15:22:12,037 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 046da8b285294b81acd748c92801df40, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733325727145 2024-12-04T15:22:12,038 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddac2f67860c4358a787fe0178533f1f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733325728313 2024-12-04T15:22:12,040 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ebf2b71d6fb84a11968676b56b38cae2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733325728313 2024-12-04T15:22:12,040 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 562677faf213454c932e2d762be7393d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733325730868 2024-12-04T15:22:12,041 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 753fe9f1f3664c95bffee6a1e4fca4b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733325730868 2024-12-04T15:22:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:12,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:12,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:12,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:12,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:12,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:12,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:12,068 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:12,073 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#179 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:12,074 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/be4f552434bb47ff9b3aca06e8264e7e is 50, key is test_row_0/B:col10/1733325730870/Put/seqid=0 2024-12-04T15:22:12,095 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204d6af2392462a4c549fcc5822d84355a3_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:12,098 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204d6af2392462a4c549fcc5822d84355a3_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:12,098 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d6af2392462a4c549fcc5822d84355a3_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:12,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325792096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325792096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120451a955ad2fc3406eaaad9d20e257bc6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325732052/Put/seqid=0 2024-12-04T15:22:12,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:12,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325792107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325792107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325792111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:12,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:12,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742036_1212 (size=12697) 2024-12-04T15:22:12,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742037_1213 (size=4469) 2024-12-04T15:22:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742038_1214 (size=12304) 2024-12-04T15:22:12,165 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#178 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:12,166 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f1678f2252bc42c6bcd2eaf71ad14942 is 175, key is test_row_0/A:col10/1733325730870/Put/seqid=0 2024-12-04T15:22:12,166 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:12,172 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120451a955ad2fc3406eaaad9d20e257bc6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120451a955ad2fc3406eaaad9d20e257bc6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:12,173 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a5a6ea61807411fbd545ba9b4313e07, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:12,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a5a6ea61807411fbd545ba9b4313e07 is 175, key is test_row_0/A:col10/1733325732052/Put/seqid=0 2024-12-04T15:22:12,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742039_1215 (size=31651) 2024-12-04T15:22:12,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742040_1216 (size=31105) 2024-12-04T15:22:12,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325792212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325792212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,215 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a5a6ea61807411fbd545ba9b4313e07 2024-12-04T15:22:12,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/22402b9300084cb3bdedb4fcb8c24171 is 50, key is test_row_0/B:col10/1733325732052/Put/seqid=0 2024-12-04T15:22:12,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325792221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325792225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325792229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742041_1217 (size=12151) 2024-12-04T15:22:12,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/22402b9300084cb3bdedb4fcb8c24171 2024-12-04T15:22:12,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/bf7224207ea542cb800f7e41da5e2ceb is 50, key is test_row_0/C:col10/1733325732052/Put/seqid=0 2024-12-04T15:22:12,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742042_1218 (size=12151) 2024-12-04T15:22:12,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/bf7224207ea542cb800f7e41da5e2ceb 2024-12-04T15:22:12,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:12,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:12,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a5a6ea61807411fbd545ba9b4313e07 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07 2024-12-04T15:22:12,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07, entries=150, sequenceid=255, filesize=30.4 K 2024-12-04T15:22:12,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/22402b9300084cb3bdedb4fcb8c24171 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/22402b9300084cb3bdedb4fcb8c24171 2024-12-04T15:22:12,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/22402b9300084cb3bdedb4fcb8c24171, entries=150, sequenceid=255, filesize=11.9 K 2024-12-04T15:22:12,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/bf7224207ea542cb800f7e41da5e2ceb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/bf7224207ea542cb800f7e41da5e2ceb 2024-12-04T15:22:12,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/bf7224207ea542cb800f7e41da5e2ceb, entries=150, sequenceid=255, filesize=11.9 K 2024-12-04T15:22:12,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 266ms, sequenceid=255, compaction requested=true 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-04T15:22:12,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-04T15:22:12,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:12,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:12,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:12,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:12,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:12,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:12,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:12,443 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:12,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:12,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325792436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325792437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120405480687f22541c68e205c5c210b2eb9_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325732085/Put/seqid=0 2024-12-04T15:22:12,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325792448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325792449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325792452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742043_1219 (size=12454) 2024-12-04T15:22:12,519 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:12,537 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120405480687f22541c68e205c5c210b2eb9_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120405480687f22541c68e205c5c210b2eb9_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:12,540 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/cc03b8830d5646ad95cd89873f3e8537, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:12,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/cc03b8830d5646ad95cd89873f3e8537 is 175, key is test_row_0/A:col10/1733325732085/Put/seqid=0 2024-12-04T15:22:12,542 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/be4f552434bb47ff9b3aca06e8264e7e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/be4f552434bb47ff9b3aca06e8264e7e 2024-12-04T15:22:12,548 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into be4f552434bb47ff9b3aca06e8264e7e(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:12,548 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:12,548 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=13, startTime=1733325732035; duration=0sec 2024-12-04T15:22:12,548 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-04T15:22:12,548 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:12,548 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-04T15:22:12,549 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:12,549 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:12,549 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. because compaction request was cancelled 2024-12-04T15:22:12,550 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:12,550 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:12,551 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:12,551 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:12,551 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,551 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/1a47af261c7b4657be557066d6800300, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/3e97a03a3ed346b59783f0f07adbbab3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/275fb76d68ec4ebdb2656a854ce45926, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/bf7224207ea542cb800f7e41da5e2ceb] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=47.9 K 2024-12-04T15:22:12,552 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a47af261c7b4657be557066d6800300, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733325727145 2024-12-04T15:22:12,552 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e97a03a3ed346b59783f0f07adbbab3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733325728313 2024-12-04T15:22:12,552 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 275fb76d68ec4ebdb2656a854ce45926, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733325730868 2024-12-04T15:22:12,553 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting bf7224207ea542cb800f7e41da5e2ceb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733325732051 2024-12-04T15:22:12,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325792550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325792564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325792565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,575 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#184 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:12,576 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/7e072375a6904f03b90397aa0f7a1b42 is 50, key is test_row_0/C:col10/1733325732052/Put/seqid=0 2024-12-04T15:22:12,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325792565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742044_1220 (size=31255) 2024-12-04T15:22:12,607 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/cc03b8830d5646ad95cd89873f3e8537 2024-12-04T15:22:12,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:12,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:12,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,619 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f1678f2252bc42c6bcd2eaf71ad14942 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f1678f2252bc42c6bcd2eaf71ad14942 2024-12-04T15:22:12,626 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into f1678f2252bc42c6bcd2eaf71ad14942(size=30.9 K), total size for store is 61.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:12,626 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=13, startTime=1733325732034; duration=0sec 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. because compaction request was cancelled 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:12,626 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:22:12,627 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:12,628 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:12,628 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. because compaction request was cancelled 2024-12-04T15:22:12,628 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:12,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742045_1221 (size=12731) 2024-12-04T15:22:12,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/beb9e8241d7f4142af8a2a0dd5167ef2 is 50, key is test_row_0/B:col10/1733325732085/Put/seqid=0 2024-12-04T15:22:12,656 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/7e072375a6904f03b90397aa0f7a1b42 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7e072375a6904f03b90397aa0f7a1b42 2024-12-04T15:22:12,670 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 7e072375a6904f03b90397aa0f7a1b42(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:12,671 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:12,671 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=12, startTime=1733325732320; duration=0sec 2024-12-04T15:22:12,671 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:12,671 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:12,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742046_1222 (size=12301) 2024-12-04T15:22:12,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325792752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325792757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,770 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:12,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:12,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325792772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325792781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325792784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:12,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:12,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325793062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325793077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,088 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:13,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:13,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325793089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/beb9e8241d7f4142af8a2a0dd5167ef2 2024-12-04T15:22:13,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325793108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/142b47cab3ad4d64846dba6c5b45a611 is 50, key is test_row_0/C:col10/1733325732085/Put/seqid=0 2024-12-04T15:22:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742047_1223 (size=12301) 2024-12-04T15:22:13,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/142b47cab3ad4d64846dba6c5b45a611 2024-12-04T15:22:13,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/cc03b8830d5646ad95cd89873f3e8537 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537 2024-12-04T15:22:13,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537, entries=150, sequenceid=282, filesize=30.5 K 2024-12-04T15:22:13,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/beb9e8241d7f4142af8a2a0dd5167ef2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/beb9e8241d7f4142af8a2a0dd5167ef2 2024-12-04T15:22:13,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/beb9e8241d7f4142af8a2a0dd5167ef2, entries=150, sequenceid=282, filesize=12.0 K 2024-12-04T15:22:13,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/142b47cab3ad4d64846dba6c5b45a611 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/142b47cab3ad4d64846dba6c5b45a611 2024-12-04T15:22:13,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:13,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:13,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/142b47cab3ad4d64846dba6c5b45a611, entries=150, sequenceid=282, filesize=12.0 K 2024-12-04T15:22:13,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 827ms, sequenceid=282, compaction requested=true 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:13,250 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:13,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:13,251 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:13,251 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94011 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:13,252 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:13,252 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,252 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f1678f2252bc42c6bcd2eaf71ad14942, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=91.8 K 2024-12-04T15:22:13,252 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,252 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f1678f2252bc42c6bcd2eaf71ad14942, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537] 2024-12-04T15:22:13,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f1678f2252bc42c6bcd2eaf71ad14942, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733325730868 2024-12-04T15:22:13,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:13,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:13,253 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,253 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/be4f552434bb47ff9b3aca06e8264e7e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/22402b9300084cb3bdedb4fcb8c24171, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/beb9e8241d7f4142af8a2a0dd5167ef2] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=36.3 K 2024-12-04T15:22:13,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a5a6ea61807411fbd545ba9b4313e07, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733325732051 2024-12-04T15:22:13,254 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting be4f552434bb47ff9b3aca06e8264e7e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733325730868 2024-12-04T15:22:13,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting cc03b8830d5646ad95cd89873f3e8537, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733325732085 2024-12-04T15:22:13,255 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22402b9300084cb3bdedb4fcb8c24171, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733325732051 2024-12-04T15:22:13,256 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting beb9e8241d7f4142af8a2a0dd5167ef2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733325732085 2024-12-04T15:22:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:13,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:13,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:13,274 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:13,291 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#188 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:13,292 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/48b82d7248f14b838d3887c96a3f39f9 is 50, key is test_row_0/B:col10/1733325732085/Put/seqid=0 2024-12-04T15:22:13,300 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412042d2e1741b4c04ff6ab9a817f8cb04ed4_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:13,302 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412042d2e1741b4c04ff6ab9a817f8cb04ed4_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:13,303 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042d2e1741b4c04ff6ab9a817f8cb04ed4_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:13,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742048_1224 (size=12949) 2024-12-04T15:22:13,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742049_1225 (size=4469) 2024-12-04T15:22:13,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046a6ab3070d8346a198b7e775cedbb98c_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325732442/Put/seqid=0 2024-12-04T15:22:13,399 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#187 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:13,400 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/846c22f4fdf44645833b39f54adf2dd4 is 175, key is test_row_0/A:col10/1733325732085/Put/seqid=0 2024-12-04T15:22:13,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:13,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:13,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325793426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742050_1226 (size=14994) 2024-12-04T15:22:13,452 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:13,463 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046a6ab3070d8346a198b7e775cedbb98c_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046a6ab3070d8346a198b7e775cedbb98c_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:13,464 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/736d06bbe0b54171ac9f81922182f8f9, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:13,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/736d06bbe0b54171ac9f81922182f8f9 is 175, key is test_row_0/A:col10/1733325732442/Put/seqid=0 2024-12-04T15:22:13,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742051_1227 (size=31903) 2024-12-04T15:22:13,487 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/846c22f4fdf44645833b39f54adf2dd4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/846c22f4fdf44645833b39f54adf2dd4 2024-12-04T15:22:13,495 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 846c22f4fdf44645833b39f54adf2dd4(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:13,495 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:13,495 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=13, startTime=1733325733250; duration=0sec 2024-12-04T15:22:13,495 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:13,496 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:13,496 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:22:13,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742052_1228 (size=39949) 2024-12-04T15:22:13,498 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:13,498 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:13,498 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. because compaction request was cancelled 2024-12-04T15:22:13,498 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:13,499 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/736d06bbe0b54171ac9f81922182f8f9 2024-12-04T15:22:13,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/e737c0de24ec454d95409e209de894e5 is 50, key is test_row_0/B:col10/1733325732442/Put/seqid=0 2024-12-04T15:22:13,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325793540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742053_1229 (size=12301) 2024-12-04T15:22:13,554 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:13,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:13,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/e737c0de24ec454d95409e209de894e5 2024-12-04T15:22:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325793568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325793581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/af8e9913161248b3a81ae17bd11e9c10 is 50, key is test_row_0/C:col10/1733325732442/Put/seqid=0 2024-12-04T15:22:13,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325793594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:13,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325793614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742054_1230 (size=12301) 2024-12-04T15:22:13,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:13,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:13,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325793745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,796 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/48b82d7248f14b838d3887c96a3f39f9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/48b82d7248f14b838d3887c96a3f39f9 2024-12-04T15:22:13,816 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 48b82d7248f14b838d3887c96a3f39f9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:13,816 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:13,816 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=13, startTime=1733325733250; duration=0sec 2024-12-04T15:22:13,816 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:13,816 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:13,875 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:13,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:13,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:13,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:13,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:14,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/af8e9913161248b3a81ae17bd11e9c10 2024-12-04T15:22:14,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:14,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:14,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:14,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:14,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:14,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:14,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:14,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325794052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/736d06bbe0b54171ac9f81922182f8f9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9 2024-12-04T15:22:14,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9, entries=200, sequenceid=296, filesize=39.0 K 2024-12-04T15:22:14,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/e737c0de24ec454d95409e209de894e5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e737c0de24ec454d95409e209de894e5 2024-12-04T15:22:14,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e737c0de24ec454d95409e209de894e5, entries=150, sequenceid=296, filesize=12.0 K 2024-12-04T15:22:14,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/af8e9913161248b3a81ae17bd11e9c10 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/af8e9913161248b3a81ae17bd11e9c10 2024-12-04T15:22:14,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/af8e9913161248b3a81ae17bd11e9c10, entries=150, sequenceid=296, filesize=12.0 K 2024-12-04T15:22:14,074 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 809ms, sequenceid=296, compaction requested=true 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:14,074 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:22:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:14,074 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. because compaction request was cancelled 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. because compaction request was cancelled 2024-12-04T15:22:14,075 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:14,076 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:14,077 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:14,077 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:14,077 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7e072375a6904f03b90397aa0f7a1b42, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/142b47cab3ad4d64846dba6c5b45a611, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/af8e9913161248b3a81ae17bd11e9c10] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=36.5 K 2024-12-04T15:22:14,077 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e072375a6904f03b90397aa0f7a1b42, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733325732051 2024-12-04T15:22:14,078 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 142b47cab3ad4d64846dba6c5b45a611, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733325732085 2024-12-04T15:22:14,078 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting af8e9913161248b3a81ae17bd11e9c10, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733325732442 2024-12-04T15:22:14,086 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#192 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:14,087 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/953248d8a7ce4bf4a38a9deac2c881f3 is 50, key is test_row_0/C:col10/1733325732442/Put/seqid=0 2024-12-04T15:22:14,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742055_1231 (size=12983) 2024-12-04T15:22:14,106 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/953248d8a7ce4bf4a38a9deac2c881f3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/953248d8a7ce4bf4a38a9deac2c881f3 2024-12-04T15:22:14,112 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 953248d8a7ce4bf4a38a9deac2c881f3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:14,112 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:14,112 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=13, startTime=1733325734074; duration=0sec 2024-12-04T15:22:14,113 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:14,113 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:14,193 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-04T15:22:14,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:14,196 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:22:14,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:14,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:14,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:14,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:14,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:14,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:14,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e4d492a9fad642629b7c9a6c0febedd7_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325733407/Put/seqid=0 2024-12-04T15:22:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742056_1232 (size=12454) 2024-12-04T15:22:14,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:14,242 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e4d492a9fad642629b7c9a6c0febedd7_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e4d492a9fad642629b7c9a6c0febedd7_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:14,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/59428fedb0f3480d91d3c179f013e6df, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:14,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/59428fedb0f3480d91d3c179f013e6df is 175, key is test_row_0/A:col10/1733325733407/Put/seqid=0 2024-12-04T15:22:14,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742057_1233 (size=31255) 2024-12-04T15:22:14,289 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/59428fedb0f3480d91d3c179f013e6df 2024-12-04T15:22:14,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/4b9c506ba33e40789932ae1fcde1c5e8 is 50, key is test_row_0/B:col10/1733325733407/Put/seqid=0 2024-12-04T15:22:14,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742058_1234 (size=12301) 2024-12-04T15:22:14,310 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/4b9c506ba33e40789932ae1fcde1c5e8 2024-12-04T15:22:14,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/8388f73245a149598fbdd83e49287066 is 50, key is test_row_0/C:col10/1733325733407/Put/seqid=0 2024-12-04T15:22:14,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742059_1235 (size=12301) 2024-12-04T15:22:14,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:14,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:14,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325794585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325794587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325794588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325794613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325794628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325794689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325794693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,735 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/8388f73245a149598fbdd83e49287066 2024-12-04T15:22:14,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/59428fedb0f3480d91d3c179f013e6df as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df 2024-12-04T15:22:14,751 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df, entries=150, sequenceid=324, filesize=30.5 K 2024-12-04T15:22:14,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/4b9c506ba33e40789932ae1fcde1c5e8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4b9c506ba33e40789932ae1fcde1c5e8 2024-12-04T15:22:14,760 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4b9c506ba33e40789932ae1fcde1c5e8, entries=150, sequenceid=324, filesize=12.0 K 2024-12-04T15:22:14,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/8388f73245a149598fbdd83e49287066 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/8388f73245a149598fbdd83e49287066 2024-12-04T15:22:14,767 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/8388f73245a149598fbdd83e49287066, entries=150, sequenceid=324, filesize=12.0 K 2024-12-04T15:22:14,768 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 572ms, sequenceid=324, compaction requested=true 2024-12-04T15:22:14,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:14,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:14,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-04T15:22:14,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-04T15:22:14,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-04T15:22:14,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2770 sec 2024-12-04T15:22:14,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 3.2850 sec 2024-12-04T15:22:14,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:14,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:22:14,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:14,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:14,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:14,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:14,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:14,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:14,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fbcb6d9ca58e479a857d2479d9fbc1b4_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:14,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742060_1236 (size=14994) 2024-12-04T15:22:14,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325794953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:14,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:14,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325794956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325795054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325795060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325795258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325795277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,335 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:15,354 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fbcb6d9ca58e479a857d2479d9fbc1b4_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fbcb6d9ca58e479a857d2479d9fbc1b4_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:15,361 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9de306d3e1744a8f91c710fe621ac064, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:15,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9de306d3e1744a8f91c710fe621ac064 is 175, key is test_row_0/A:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:15,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742061_1237 (size=39949) 2024-12-04T15:22:15,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325795563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325795585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-04T15:22:15,629 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-04T15:22:15,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-04T15:22:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-04T15:22:15,660 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:15,661 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:15,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:15,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-04T15:22:15,793 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=337, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9de306d3e1744a8f91c710fe621ac064 2024-12-04T15:22:15,813 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-04T15:22:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:15,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:15,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:15,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/c8642302df9e4388bf289a046b7c8462 is 50, key is test_row_0/B:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:15,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742062_1238 (size=12301) 2024-12-04T15:22:15,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/c8642302df9e4388bf289a046b7c8462 2024-12-04T15:22:15,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/01fb0578a2964ddda053d3803ffa88c1 is 50, key is test_row_0/C:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:15,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742063_1239 (size=12301) 2024-12-04T15:22:15,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/01fb0578a2964ddda053d3803ffa88c1 2024-12-04T15:22:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9de306d3e1744a8f91c710fe621ac064 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064 2024-12-04T15:22:15,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064, entries=200, sequenceid=337, filesize=39.0 K 2024-12-04T15:22:15,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/c8642302df9e4388bf289a046b7c8462 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/c8642302df9e4388bf289a046b7c8462 2024-12-04T15:22:15,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/c8642302df9e4388bf289a046b7c8462, entries=150, sequenceid=337, filesize=12.0 K 2024-12-04T15:22:15,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/01fb0578a2964ddda053d3803ffa88c1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/01fb0578a2964ddda053d3803ffa88c1 2024-12-04T15:22:15,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-04T15:22:15,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/01fb0578a2964ddda053d3803ffa88c1, entries=150, sequenceid=337, filesize=12.0 K 2024-12-04T15:22:15,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1067ms, sequenceid=337, compaction requested=true 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:15,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:22:15,963 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:15,964 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:15,968 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:15,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-04T15:22:15,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:15,972 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:22:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:15,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:15,974 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:15,974 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:15,974 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:15,974 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/48b82d7248f14b838d3887c96a3f39f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e737c0de24ec454d95409e209de894e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4b9c506ba33e40789932ae1fcde1c5e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/c8642302df9e4388bf289a046b7c8462] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=48.7 K 2024-12-04T15:22:15,974 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:15,975 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:15,975 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:15,975 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/846c22f4fdf44645833b39f54adf2dd4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=139.7 K 2024-12-04T15:22:15,975 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:15,975 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/846c22f4fdf44645833b39f54adf2dd4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064] 2024-12-04T15:22:15,975 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 48b82d7248f14b838d3887c96a3f39f9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733325732085 2024-12-04T15:22:15,975 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 846c22f4fdf44645833b39f54adf2dd4, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733325732085 2024-12-04T15:22:15,976 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e737c0de24ec454d95409e209de894e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733325732442 2024-12-04T15:22:15,976 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 736d06bbe0b54171ac9f81922182f8f9, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733325732433 2024-12-04T15:22:15,976 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b9c506ba33e40789932ae1fcde1c5e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733325733354 2024-12-04T15:22:15,976 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59428fedb0f3480d91d3c179f013e6df, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733325733354 2024-12-04T15:22:15,977 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c8642302df9e4388bf289a046b7c8462, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733325734579 2024-12-04T15:22:15,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9de306d3e1744a8f91c710fe621ac064, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733325734579 2024-12-04T15:22:15,999 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#199 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:15,999 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/45e798bfd8954f8a994f8287c8dcae09 is 50, key is test_row_0/B:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:16,000 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:16,012 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204e68f85bbcf9c42b2b65e7670938f7ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:16,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204e68f85bbcf9c42b2b65e7670938f7ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:16,015 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e68f85bbcf9c42b2b65e7670938f7ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:16,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120415915519e94746e3ae255350cfe75f1c_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325734950/Put/seqid=0 2024-12-04T15:22:16,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742064_1240 (size=13085) 2024-12-04T15:22:16,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742065_1241 (size=4469) 2024-12-04T15:22:16,045 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#200 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:16,046 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/b9fae9b1d888405baf908946535bfbb3 is 175, key is test_row_0/A:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:16,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742066_1242 (size=12454) 2024-12-04T15:22:16,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:16,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:16,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742067_1243 (size=32039) 2024-12-04T15:22:16,148 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/b9fae9b1d888405baf908946535bfbb3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b9fae9b1d888405baf908946535bfbb3 2024-12-04T15:22:16,184 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into b9fae9b1d888405baf908946535bfbb3(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:16,184 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:16,184 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=12, startTime=1733325735963; duration=0sec 2024-12-04T15:22:16,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325796178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,185 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:16,185 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:16,185 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:16,193 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:16,193 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:16,193 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:16,193 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/953248d8a7ce4bf4a38a9deac2c881f3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/8388f73245a149598fbdd83e49287066, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/01fb0578a2964ddda053d3803ffa88c1] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=36.7 K 2024-12-04T15:22:16,194 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 953248d8a7ce4bf4a38a9deac2c881f3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733325732442 2024-12-04T15:22:16,194 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8388f73245a149598fbdd83e49287066, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733325733354 2024-12-04T15:22:16,196 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01fb0578a2964ddda053d3803ffa88c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733325734579 2024-12-04T15:22:16,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325796186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,219 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#202 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:16,219 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/fd337d93f97a4e489da9c9885f1e071c is 50, key is test_row_0/C:col10/1733325734579/Put/seqid=0 2024-12-04T15:22:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-04T15:22:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742068_1244 (size=13085) 2024-12-04T15:22:16,283 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/fd337d93f97a4e489da9c9885f1e071c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fd337d93f97a4e489da9c9885f1e071c 2024-12-04T15:22:16,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325796287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325796302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,316 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into fd337d93f97a4e489da9c9885f1e071c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:16,316 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:16,316 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=13, startTime=1733325735963; duration=0sec 2024-12-04T15:22:16,316 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:16,316 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:16,442 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/45e798bfd8954f8a994f8287c8dcae09 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/45e798bfd8954f8a994f8287c8dcae09 2024-12-04T15:22:16,448 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 45e798bfd8954f8a994f8287c8dcae09(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:16,448 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:16,448 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=12, startTime=1733325735963; duration=0sec 2024-12-04T15:22:16,448 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:16,448 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:16,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:16,461 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120415915519e94746e3ae255350cfe75f1c_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120415915519e94746e3ae255350cfe75f1c_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:16,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/2c2d43485cfc454a86eab908fb1b814a, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:16,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/2c2d43485cfc454a86eab908fb1b814a is 175, key is test_row_0/A:col10/1733325734950/Put/seqid=0 2024-12-04T15:22:16,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742069_1245 (size=31255) 2024-12-04T15:22:16,475 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=360, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/2c2d43485cfc454a86eab908fb1b814a 2024-12-04T15:22:16,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325796495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/2cbf410d244149dc906e26c05935803d is 50, key is test_row_0/B:col10/1733325734950/Put/seqid=0 2024-12-04T15:22:16,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325796510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742070_1246 (size=12301) 2024-12-04T15:22:16,524 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/2cbf410d244149dc906e26c05935803d 2024-12-04T15:22:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/7b6fb085ae0f4b11bc52714359f12ce3 is 50, key is test_row_0/C:col10/1733325734950/Put/seqid=0 2024-12-04T15:22:16,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742071_1247 (size=12301) 2024-12-04T15:22:16,563 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/7b6fb085ae0f4b11bc52714359f12ce3 2024-12-04T15:22:16,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/2c2d43485cfc454a86eab908fb1b814a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a 2024-12-04T15:22:16,598 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a, entries=150, sequenceid=360, filesize=30.5 K 2024-12-04T15:22:16,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/2cbf410d244149dc906e26c05935803d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/2cbf410d244149dc906e26c05935803d 2024-12-04T15:22:16,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58526 deadline: 1733325796600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,606 DEBUG [Thread-711 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:22:16,616 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/2cbf410d244149dc906e26c05935803d, entries=150, sequenceid=360, filesize=12.0 K 2024-12-04T15:22:16,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/7b6fb085ae0f4b11bc52714359f12ce3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7b6fb085ae0f4b11bc52714359f12ce3 2024-12-04T15:22:16,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58556 deadline: 1733325796621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,626 DEBUG [Thread-713 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:22:16,627 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7b6fb085ae0f4b11bc52714359f12ce3, entries=150, sequenceid=360, filesize=12.0 K 2024-12-04T15:22:16,628 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 656ms, sequenceid=360, compaction requested=false 2024-12-04T15:22:16,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:16,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:16,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-04T15:22:16,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-04T15:22:16,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-04T15:22:16,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 968 msec 2024-12-04T15:22:16,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 988 msec 2024-12-04T15:22:16,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:16,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:22:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:16,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204698af062376c41d5a44465605a361aa2_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742072_1248 (size=14994) 2024-12-04T15:22:16,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325796746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-04T15:22:16,764 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-04T15:22:16,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:16,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-04T15:22:16,770 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:16,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-04T15:22:16,771 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:16,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:16,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325796802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325796814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:16,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325796853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-04T15:22:16,906 DEBUG [Thread-724 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04d2c596 to 127.0.0.1:55739 2024-12-04T15:22:16,906 DEBUG [Thread-724 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:16,906 DEBUG [Thread-722 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x139ceb9b to 127.0.0.1:55739 2024-12-04T15:22:16,906 DEBUG [Thread-722 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:16,908 DEBUG [Thread-726 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x57ef66fa to 127.0.0.1:55739 2024-12-04T15:22:16,908 DEBUG [Thread-726 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:16,915 DEBUG [Thread-728 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x427f113e to 127.0.0.1:55739 2024-12-04T15:22:16,915 DEBUG [Thread-728 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:16,923 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:16,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:16,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:16,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:16,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:16,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:16,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:17,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325797060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-04T15:22:17,076 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,107 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:17,112 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204698af062376c41d5a44465605a361aa2_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204698af062376c41d5a44465605a361aa2_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:17,113 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f514bf49ee8b4de885cd2e208e19696d, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:17,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f514bf49ee8b4de885cd2e208e19696d is 175, key is test_row_0/A:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:17,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742073_1249 (size=39949) 2024-12-04T15:22:17,229 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:17,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58498 deadline: 1733325797306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:17,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58496 deadline: 1733325797320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:17,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325797363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-04T15:22:17,382 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:17,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,519 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f514bf49ee8b4de885cd2e208e19696d 2024-12-04T15:22:17,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/7531600ed8404864b35bcd265a2418c9 is 50, key is test_row_0/B:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:17,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742074_1250 (size=12301) 2024-12-04T15:22:17,535 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:17,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/7531600ed8404864b35bcd265a2418c9 2024-12-04T15:22:17,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/91b1ea45fb6f45c8a434768f34a60884 is 50, key is test_row_0/C:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:17,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742075_1251 (size=12301) 2024-12-04T15:22:17,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:17,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,841 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. as already flushing 2024-12-04T15:22:17,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58570 deadline: 1733325797866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-04T15:22:17,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/91b1ea45fb6f45c8a434768f34a60884 2024-12-04T15:22:17,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/f514bf49ee8b4de885cd2e208e19696d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d 2024-12-04T15:22:17,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d, entries=200, sequenceid=377, filesize=39.0 K 2024-12-04T15:22:17,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/7531600ed8404864b35bcd265a2418c9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/7531600ed8404864b35bcd265a2418c9 2024-12-04T15:22:17,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/7531600ed8404864b35bcd265a2418c9, entries=150, sequenceid=377, filesize=12.0 K 2024-12-04T15:22:17,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/91b1ea45fb6f45c8a434768f34a60884 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/91b1ea45fb6f45c8a434768f34a60884 2024-12-04T15:22:17,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/91b1ea45fb6f45c8a434768f34a60884, entries=150, sequenceid=377, filesize=12.0 K 2024-12-04T15:22:17,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1329ms, sequenceid=377, compaction requested=true 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:17,976 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:17,976 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3bb1bcca60c6b0bcd1824e7ad9c8f501:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:17,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:17,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:17,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/A is initiating minor compaction (all files) 2024-12-04T15:22:17,977 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/A in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,977 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b9fae9b1d888405baf908946535bfbb3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=100.8 K 2024-12-04T15:22:17,977 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b9fae9b1d888405baf908946535bfbb3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d] 2024-12-04T15:22:17,977 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:17,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/B is initiating minor compaction (all files) 2024-12-04T15:22:17,978 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/B in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,978 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/45e798bfd8954f8a994f8287c8dcae09, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/2cbf410d244149dc906e26c05935803d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/7531600ed8404864b35bcd265a2418c9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=36.8 K 2024-12-04T15:22:17,978 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9fae9b1d888405baf908946535bfbb3, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733325734579 2024-12-04T15:22:17,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 45e798bfd8954f8a994f8287c8dcae09, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733325734579 2024-12-04T15:22:17,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c2d43485cfc454a86eab908fb1b814a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1733325734949 2024-12-04T15:22:17,981 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cbf410d244149dc906e26c05935803d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1733325734949 2024-12-04T15:22:17,981 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f514bf49ee8b4de885cd2e208e19696d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733325736161 2024-12-04T15:22:17,981 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7531600ed8404864b35bcd265a2418c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733325736161 2024-12-04T15:22:17,994 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#B#compaction#208 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:17,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:17,995 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/5f30b3922e184a139820921416520c15 is 50, key is test_row_0/B:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:17,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-04T15:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:17,996 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:22:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:17,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:18,003 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:18,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742076_1252 (size=13187) 2024-12-04T15:22:18,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d413976a76d84b118ea2976ac3929c58_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325736744/Put/seqid=0 2024-12-04T15:22:18,024 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204787d695886614be2847eb167d67a0845_3bb1bcca60c6b0bcd1824e7ad9c8f501 store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:18,026 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/5f30b3922e184a139820921416520c15 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5f30b3922e184a139820921416520c15 2024-12-04T15:22:18,041 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/B of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 5f30b3922e184a139820921416520c15(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:18,041 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:18,041 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/B, priority=13, startTime=1733325737976; duration=0sec 2024-12-04T15:22:18,041 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:18,041 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:B 2024-12-04T15:22:18,041 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:18,048 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:18,048 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 3bb1bcca60c6b0bcd1824e7ad9c8f501/C is initiating minor compaction (all files) 2024-12-04T15:22:18,048 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3bb1bcca60c6b0bcd1824e7ad9c8f501/C in TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:18,048 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fd337d93f97a4e489da9c9885f1e071c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7b6fb085ae0f4b11bc52714359f12ce3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/91b1ea45fb6f45c8a434768f34a60884] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp, totalSize=36.8 K 2024-12-04T15:22:18,049 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fd337d93f97a4e489da9c9885f1e071c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733325734579 2024-12-04T15:22:18,049 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b6fb085ae0f4b11bc52714359f12ce3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1733325734949 2024-12-04T15:22:18,050 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 91b1ea45fb6f45c8a434768f34a60884, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733325736161 2024-12-04T15:22:18,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742077_1253 (size=12454) 2024-12-04T15:22:18,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:18,066 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d413976a76d84b118ea2976ac3929c58_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d413976a76d84b118ea2976ac3929c58_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:18,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a97d7a0b10b43ef86311d08085f95c4, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:18,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a97d7a0b10b43ef86311d08085f95c4 is 175, key is test_row_0/A:col10/1733325736744/Put/seqid=0 2024-12-04T15:22:18,085 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#C#compaction#211 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:18,086 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/08bfc14759d54c36b7c0246cc559d55c is 50, key is test_row_0/C:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:18,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742078_1254 (size=31255) 2024-12-04T15:22:18,091 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=399, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a97d7a0b10b43ef86311d08085f95c4 2024-12-04T15:22:18,111 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204787d695886614be2847eb167d67a0845_3bb1bcca60c6b0bcd1824e7ad9c8f501, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:18,111 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204787d695886614be2847eb167d67a0845_3bb1bcca60c6b0bcd1824e7ad9c8f501 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:18,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/e67e615364044ed0b1c44ce51800c220 is 50, key is test_row_0/B:col10/1733325736744/Put/seqid=0 2024-12-04T15:22:18,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742079_1255 (size=13187) 2024-12-04T15:22:18,150 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/08bfc14759d54c36b7c0246cc559d55c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/08bfc14759d54c36b7c0246cc559d55c 2024-12-04T15:22:18,156 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/C of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into 08bfc14759d54c36b7c0246cc559d55c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:18,156 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:18,157 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/C, priority=13, startTime=1733325737976; duration=0sec 2024-12-04T15:22:18,157 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:18,157 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:C 2024-12-04T15:22:18,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742080_1256 (size=4469) 2024-12-04T15:22:18,173 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3bb1bcca60c6b0bcd1824e7ad9c8f501#A#compaction#209 average throughput is 0.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:18,174 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a94edea8d93b48c084a93961d7995fa2 is 175, key is test_row_0/A:col10/1733325736643/Put/seqid=0 2024-12-04T15:22:18,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742081_1257 (size=12301) 2024-12-04T15:22:18,185 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/e67e615364044ed0b1c44ce51800c220 2024-12-04T15:22:18,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742082_1258 (size=32141) 2024-12-04T15:22:18,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/72beb9ae50464e3eb6a2a7a70325bb38 is 50, key is test_row_0/C:col10/1733325736744/Put/seqid=0 2024-12-04T15:22:18,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742083_1259 (size=12301) 2024-12-04T15:22:18,243 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/72beb9ae50464e3eb6a2a7a70325bb38 2024-12-04T15:22:18,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/6a97d7a0b10b43ef86311d08085f95c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a97d7a0b10b43ef86311d08085f95c4 2024-12-04T15:22:18,254 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a97d7a0b10b43ef86311d08085f95c4, entries=150, sequenceid=399, filesize=30.5 K 2024-12-04T15:22:18,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/e67e615364044ed0b1c44ce51800c220 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e67e615364044ed0b1c44ce51800c220 2024-12-04T15:22:18,260 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e67e615364044ed0b1c44ce51800c220, entries=150, sequenceid=399, filesize=12.0 K 2024-12-04T15:22:18,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/72beb9ae50464e3eb6a2a7a70325bb38 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/72beb9ae50464e3eb6a2a7a70325bb38 2024-12-04T15:22:18,266 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/72beb9ae50464e3eb6a2a7a70325bb38, entries=150, sequenceid=399, filesize=12.0 K 2024-12-04T15:22:18,267 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 272ms, sequenceid=399, compaction requested=false 2024-12-04T15:22:18,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:18,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:18,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-04T15:22:18,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-04T15:22:18,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-04T15:22:18,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4970 sec 2024-12-04T15:22:18,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.5050 sec 2024-12-04T15:22:18,316 DEBUG [Thread-715 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7461d456 to 127.0.0.1:55739 2024-12-04T15:22:18,316 DEBUG [Thread-715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:18,327 DEBUG [Thread-717 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63176ac8 to 127.0.0.1:55739 2024-12-04T15:22:18,327 DEBUG [Thread-717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:18,610 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/a94edea8d93b48c084a93961d7995fa2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a94edea8d93b48c084a93961d7995fa2 2024-12-04T15:22:18,616 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb1bcca60c6b0bcd1824e7ad9c8f501/A of 3bb1bcca60c6b0bcd1824e7ad9c8f501 into a94edea8d93b48c084a93961d7995fa2(size=31.4 K), total size for store is 61.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:18,616 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:18,616 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501., storeName=3bb1bcca60c6b0bcd1824e7ad9c8f501/A, priority=13, startTime=1733325737976; duration=0sec 2024-12-04T15:22:18,616 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:18,616 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3bb1bcca60c6b0bcd1824e7ad9c8f501:A 2024-12-04T15:22:18,871 DEBUG [Thread-719 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x01639acb to 127.0.0.1:55739 2024-12-04T15:22:18,872 DEBUG [Thread-719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-04T15:22:18,875 INFO [Thread-721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-04T15:22:20,617 DEBUG [Thread-711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb5c6ff to 127.0.0.1:55739 2024-12-04T15:22:20,617 DEBUG [Thread-711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:20,637 DEBUG [Thread-713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a3677e6 to 127.0.0.1:55739 2024-12-04T15:22:20,637 DEBUG [Thread-713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3010 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2920 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1246 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3738 rows 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1251 2024-12-04T15:22:20,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3752 rows 2024-12-04T15:22:20,637 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:22:20,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38284410 to 127.0.0.1:55739 2024-12-04T15:22:20,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:20,641 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-04T15:22:20,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-04T15:22:20,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:20,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-04T15:22:20,646 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325740646"}]},"ts":"1733325740646"} 2024-12-04T15:22:20,647 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-04T15:22:20,649 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-04T15:22:20,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:22:20,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, UNASSIGN}] 2024-12-04T15:22:20,652 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, UNASSIGN 2024-12-04T15:22:20,652 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:20,653 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:22:20,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:22:20,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-04T15:22:20,804 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:20,805 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:20,805 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:22:20,805 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing 3bb1bcca60c6b0bcd1824e7ad9c8f501, disabling compactions & flushes 2024-12-04T15:22:20,805 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:20,805 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:20,805 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. after waiting 0 ms 2024-12-04T15:22:20,805 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:20,805 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing 3bb1bcca60c6b0bcd1824e7ad9c8f501 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-04T15:22:20,806 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=A 2024-12-04T15:22:20,806 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:20,806 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=B 2024-12-04T15:22:20,806 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:20,806 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3bb1bcca60c6b0bcd1824e7ad9c8f501, store=C 2024-12-04T15:22:20,806 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:20,814 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204cc455cf7e0fe42339a03dc1520033ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 is 50, key is test_row_0/A:col10/1733325740636/Put/seqid=0 2024-12-04T15:22:20,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742084_1260 (size=12454) 2024-12-04T15:22:20,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-04T15:22:21,220 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:21,224 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204cc455cf7e0fe42339a03dc1520033ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204cc455cf7e0fe42339a03dc1520033ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:21,225 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9b0abe35022e43a485deef9d2589dc32, store: [table=TestAcidGuarantees family=A region=3bb1bcca60c6b0bcd1824e7ad9c8f501] 2024-12-04T15:22:21,226 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9b0abe35022e43a485deef9d2589dc32 is 175, key is test_row_0/A:col10/1733325740636/Put/seqid=0 2024-12-04T15:22:21,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742085_1261 (size=31255) 2024-12-04T15:22:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-04T15:22:21,630 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=410, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9b0abe35022e43a485deef9d2589dc32 2024-12-04T15:22:21,638 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/33643eef587d4eebbd461b6da255040b is 50, key is test_row_0/B:col10/1733325740636/Put/seqid=0 2024-12-04T15:22:21,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742086_1262 (size=12301) 2024-12-04T15:22:21,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-04T15:22:22,054 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/33643eef587d4eebbd461b6da255040b 2024-12-04T15:22:22,061 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/03a3a380cc36442f9227b1db90bdbb54 is 50, key is test_row_0/C:col10/1733325740636/Put/seqid=0 2024-12-04T15:22:22,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742087_1263 (size=12301) 2024-12-04T15:22:22,213 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T15:22:22,465 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/03a3a380cc36442f9227b1db90bdbb54 2024-12-04T15:22:22,471 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/A/9b0abe35022e43a485deef9d2589dc32 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9b0abe35022e43a485deef9d2589dc32 2024-12-04T15:22:22,475 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9b0abe35022e43a485deef9d2589dc32, entries=150, sequenceid=410, filesize=30.5 K 2024-12-04T15:22:22,476 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/B/33643eef587d4eebbd461b6da255040b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/33643eef587d4eebbd461b6da255040b 2024-12-04T15:22:22,480 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/33643eef587d4eebbd461b6da255040b, entries=150, sequenceid=410, filesize=12.0 K 2024-12-04T15:22:22,481 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/.tmp/C/03a3a380cc36442f9227b1db90bdbb54 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/03a3a380cc36442f9227b1db90bdbb54 2024-12-04T15:22:22,486 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/03a3a380cc36442f9227b1db90bdbb54, entries=150, sequenceid=410, filesize=12.0 K 2024-12-04T15:22:22,487 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 3bb1bcca60c6b0bcd1824e7ad9c8f501 in 1682ms, sequenceid=410, compaction requested=true 2024-12-04T15:22:22,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d71f99ca13374310a5f4f29598f6fa07, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f79295b59a794efcba1362ba9a2c269e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a31ddccb1cd040abaf3061820278012f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/03ac288541be4f01ad67b4c69deda3a4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f1678f2252bc42c6bcd2eaf71ad14942, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/846c22f4fdf44645833b39f54adf2dd4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b9fae9b1d888405baf908946535bfbb3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d] to archive 2024-12-04T15:22:22,489 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:22:22,491 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/da2549b02e1b4290b49436b9ac5bfa9c 2024-12-04T15:22:22,493 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/8c8d3bc5f27a448a958483436033ad9f 2024-12-04T15:22:22,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d71f99ca13374310a5f4f29598f6fa07 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d71f99ca13374310a5f4f29598f6fa07 2024-12-04T15:22:22,496 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/d6ec2b06a136467dbd353f5fafbd5d29 2024-12-04T15:22:22,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a100cd51cc8845e08826bb0ddc244642 2024-12-04T15:22:22,499 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a63b2954dcd347629b2853517bee957b 2024-12-04T15:22:22,500 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/45ed29495496495c83c6fd2c12a7314e 2024-12-04T15:22:22,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f79295b59a794efcba1362ba9a2c269e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f79295b59a794efcba1362ba9a2c269e 2024-12-04T15:22:22,503 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b8cc81042f8d49239915cc2ed1ab6e65 2024-12-04T15:22:22,505 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a31ddccb1cd040abaf3061820278012f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a31ddccb1cd040abaf3061820278012f 2024-12-04T15:22:22,506 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/3047a5cafd3a467892a7d44f78c5dff2 2024-12-04T15:22:22,508 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/48cbcb656e39452faef886ea907b68b1 2024-12-04T15:22:22,513 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/03ac288541be4f01ad67b4c69deda3a4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/03ac288541be4f01ad67b4c69deda3a4 2024-12-04T15:22:22,521 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/0a2e507357f14f9d81d8ac49719a626f 2024-12-04T15:22:22,523 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/ddac2f67860c4358a787fe0178533f1f 2024-12-04T15:22:22,527 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/562677faf213454c932e2d762be7393d 2024-12-04T15:22:22,528 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f1678f2252bc42c6bcd2eaf71ad14942 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f1678f2252bc42c6bcd2eaf71ad14942 2024-12-04T15:22:22,530 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a5a6ea61807411fbd545ba9b4313e07 2024-12-04T15:22:22,531 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/846c22f4fdf44645833b39f54adf2dd4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/846c22f4fdf44645833b39f54adf2dd4 2024-12-04T15:22:22,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/cc03b8830d5646ad95cd89873f3e8537 2024-12-04T15:22:22,534 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/736d06bbe0b54171ac9f81922182f8f9 2024-12-04T15:22:22,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/59428fedb0f3480d91d3c179f013e6df 2024-12-04T15:22:22,542 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9de306d3e1744a8f91c710fe621ac064 2024-12-04T15:22:22,544 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b9fae9b1d888405baf908946535bfbb3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/b9fae9b1d888405baf908946535bfbb3 2024-12-04T15:22:22,545 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/2c2d43485cfc454a86eab908fb1b814a 2024-12-04T15:22:22,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/f514bf49ee8b4de885cd2e208e19696d 2024-12-04T15:22:22,548 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5b7ee73d21ce40dcb29165c97f4aed1f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/b01575345e514d729a3b7f13cbad28c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/f7096d0e25824b3b9e4a79ff653f61eb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/49a8e2bfca07463bbc4d6014f98869ea, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/9eda243979c342678b114f9ffebfb392, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/09d3adada5204d5291610a193c44ddf4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/0318a86d46c0457d986d9228dcab7a00, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1f33e6b6517248d98b98e5b50a79c055, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4136d4e1100647b988ac6a15815c8aca, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/a8580831c3a94dc9a641b8e9b4d46ddb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1d253cfd407342b8a289dda98a58141f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1a3a576c4cdd48d780b279d34a8cd222, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/046da8b285294b81acd748c92801df40, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/8dfac8a07b574261ac5f8947f16b976d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/ebf2b71d6fb84a11968676b56b38cae2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/be4f552434bb47ff9b3aca06e8264e7e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/753fe9f1f3664c95bffee6a1e4fca4b2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/22402b9300084cb3bdedb4fcb8c24171, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/48b82d7248f14b838d3887c96a3f39f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/beb9e8241d7f4142af8a2a0dd5167ef2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e737c0de24ec454d95409e209de894e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4b9c506ba33e40789932ae1fcde1c5e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/45e798bfd8954f8a994f8287c8dcae09, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/c8642302df9e4388bf289a046b7c8462, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/2cbf410d244149dc906e26c05935803d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/7531600ed8404864b35bcd265a2418c9] to archive 2024-12-04T15:22:22,551 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:22:22,553 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5b7ee73d21ce40dcb29165c97f4aed1f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5b7ee73d21ce40dcb29165c97f4aed1f 2024-12-04T15:22:22,554 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/b01575345e514d729a3b7f13cbad28c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/b01575345e514d729a3b7f13cbad28c4 2024-12-04T15:22:22,556 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/f7096d0e25824b3b9e4a79ff653f61eb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/f7096d0e25824b3b9e4a79ff653f61eb 2024-12-04T15:22:22,557 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/49a8e2bfca07463bbc4d6014f98869ea to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/49a8e2bfca07463bbc4d6014f98869ea 2024-12-04T15:22:22,558 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/9eda243979c342678b114f9ffebfb392 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/9eda243979c342678b114f9ffebfb392 2024-12-04T15:22:22,560 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/09d3adada5204d5291610a193c44ddf4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/09d3adada5204d5291610a193c44ddf4 2024-12-04T15:22:22,561 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/0318a86d46c0457d986d9228dcab7a00 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/0318a86d46c0457d986d9228dcab7a00 2024-12-04T15:22:22,562 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1f33e6b6517248d98b98e5b50a79c055 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1f33e6b6517248d98b98e5b50a79c055 2024-12-04T15:22:22,564 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4136d4e1100647b988ac6a15815c8aca to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4136d4e1100647b988ac6a15815c8aca 2024-12-04T15:22:22,565 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/a8580831c3a94dc9a641b8e9b4d46ddb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/a8580831c3a94dc9a641b8e9b4d46ddb 2024-12-04T15:22:22,566 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1d253cfd407342b8a289dda98a58141f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1d253cfd407342b8a289dda98a58141f 2024-12-04T15:22:22,568 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1a3a576c4cdd48d780b279d34a8cd222 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/1a3a576c4cdd48d780b279d34a8cd222 2024-12-04T15:22:22,569 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/046da8b285294b81acd748c92801df40 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/046da8b285294b81acd748c92801df40 2024-12-04T15:22:22,571 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/8dfac8a07b574261ac5f8947f16b976d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/8dfac8a07b574261ac5f8947f16b976d 2024-12-04T15:22:22,572 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/ebf2b71d6fb84a11968676b56b38cae2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/ebf2b71d6fb84a11968676b56b38cae2 2024-12-04T15:22:22,575 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/be4f552434bb47ff9b3aca06e8264e7e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/be4f552434bb47ff9b3aca06e8264e7e 2024-12-04T15:22:22,577 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/753fe9f1f3664c95bffee6a1e4fca4b2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/753fe9f1f3664c95bffee6a1e4fca4b2 2024-12-04T15:22:22,578 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/22402b9300084cb3bdedb4fcb8c24171 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/22402b9300084cb3bdedb4fcb8c24171 2024-12-04T15:22:22,579 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/48b82d7248f14b838d3887c96a3f39f9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/48b82d7248f14b838d3887c96a3f39f9 2024-12-04T15:22:22,580 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/beb9e8241d7f4142af8a2a0dd5167ef2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/beb9e8241d7f4142af8a2a0dd5167ef2 2024-12-04T15:22:22,582 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e737c0de24ec454d95409e209de894e5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e737c0de24ec454d95409e209de894e5 2024-12-04T15:22:22,583 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4b9c506ba33e40789932ae1fcde1c5e8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/4b9c506ba33e40789932ae1fcde1c5e8 2024-12-04T15:22:22,585 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/45e798bfd8954f8a994f8287c8dcae09 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/45e798bfd8954f8a994f8287c8dcae09 2024-12-04T15:22:22,586 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/c8642302df9e4388bf289a046b7c8462 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/c8642302df9e4388bf289a046b7c8462 2024-12-04T15:22:22,588 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/2cbf410d244149dc906e26c05935803d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/2cbf410d244149dc906e26c05935803d 2024-12-04T15:22:22,589 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/7531600ed8404864b35bcd265a2418c9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/7531600ed8404864b35bcd265a2418c9 2024-12-04T15:22:22,591 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/2363d74b04054f1b8f8f4a43eb3b61fb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/537f32f5951a422fa47aa5fa2687a813, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/791a466361c741179098ad87e170ca89, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/0fe3fe6d404044a5ab4b9c63c1966ab2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fb79789f8ae84b62b17ffca061d8d6c7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/5b793bac534547deb9f7b4b6400223f1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/552d56bb8d3f41c0b143cbeb98a8e8e3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/943890f65d574cbb871e387cc81bedb6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/37533b799c4e43de83853bf3ea108dc5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/77088d9e748843c18859bed8c017819a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/263c59f70cbc473cb811aff78c06ca47, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/9cdba9f2258a47e78524694ddb125b7f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/1a47af261c7b4657be557066d6800300, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/11365962712042138458fe09e1cf12ee, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/3e97a03a3ed346b59783f0f07adbbab3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/275fb76d68ec4ebdb2656a854ce45926, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7e072375a6904f03b90397aa0f7a1b42, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/bf7224207ea542cb800f7e41da5e2ceb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/142b47cab3ad4d64846dba6c5b45a611, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/953248d8a7ce4bf4a38a9deac2c881f3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/af8e9913161248b3a81ae17bd11e9c10, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/8388f73245a149598fbdd83e49287066, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fd337d93f97a4e489da9c9885f1e071c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/01fb0578a2964ddda053d3803ffa88c1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7b6fb085ae0f4b11bc52714359f12ce3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/91b1ea45fb6f45c8a434768f34a60884] to archive 2024-12-04T15:22:22,592 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:22:22,594 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/2363d74b04054f1b8f8f4a43eb3b61fb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/2363d74b04054f1b8f8f4a43eb3b61fb 2024-12-04T15:22:22,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/537f32f5951a422fa47aa5fa2687a813 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/537f32f5951a422fa47aa5fa2687a813 2024-12-04T15:22:22,597 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/791a466361c741179098ad87e170ca89 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/791a466361c741179098ad87e170ca89 2024-12-04T15:22:22,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/0fe3fe6d404044a5ab4b9c63c1966ab2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/0fe3fe6d404044a5ab4b9c63c1966ab2 2024-12-04T15:22:22,600 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fb79789f8ae84b62b17ffca061d8d6c7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fb79789f8ae84b62b17ffca061d8d6c7 2024-12-04T15:22:22,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/5b793bac534547deb9f7b4b6400223f1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/5b793bac534547deb9f7b4b6400223f1 2024-12-04T15:22:22,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/552d56bb8d3f41c0b143cbeb98a8e8e3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/552d56bb8d3f41c0b143cbeb98a8e8e3 2024-12-04T15:22:22,604 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/943890f65d574cbb871e387cc81bedb6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/943890f65d574cbb871e387cc81bedb6 2024-12-04T15:22:22,607 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/37533b799c4e43de83853bf3ea108dc5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/37533b799c4e43de83853bf3ea108dc5 2024-12-04T15:22:22,608 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/77088d9e748843c18859bed8c017819a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/77088d9e748843c18859bed8c017819a 2024-12-04T15:22:22,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/263c59f70cbc473cb811aff78c06ca47 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/263c59f70cbc473cb811aff78c06ca47 2024-12-04T15:22:22,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/9cdba9f2258a47e78524694ddb125b7f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/9cdba9f2258a47e78524694ddb125b7f 2024-12-04T15:22:22,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/1a47af261c7b4657be557066d6800300 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/1a47af261c7b4657be557066d6800300 2024-12-04T15:22:22,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/11365962712042138458fe09e1cf12ee to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/11365962712042138458fe09e1cf12ee 2024-12-04T15:22:22,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/3e97a03a3ed346b59783f0f07adbbab3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/3e97a03a3ed346b59783f0f07adbbab3 2024-12-04T15:22:22,617 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/275fb76d68ec4ebdb2656a854ce45926 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/275fb76d68ec4ebdb2656a854ce45926 2024-12-04T15:22:22,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7e072375a6904f03b90397aa0f7a1b42 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7e072375a6904f03b90397aa0f7a1b42 2024-12-04T15:22:22,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/bf7224207ea542cb800f7e41da5e2ceb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/bf7224207ea542cb800f7e41da5e2ceb 2024-12-04T15:22:22,621 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/142b47cab3ad4d64846dba6c5b45a611 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/142b47cab3ad4d64846dba6c5b45a611 2024-12-04T15:22:22,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/953248d8a7ce4bf4a38a9deac2c881f3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/953248d8a7ce4bf4a38a9deac2c881f3 2024-12-04T15:22:22,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/af8e9913161248b3a81ae17bd11e9c10 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/af8e9913161248b3a81ae17bd11e9c10 2024-12-04T15:22:22,627 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/8388f73245a149598fbdd83e49287066 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/8388f73245a149598fbdd83e49287066 2024-12-04T15:22:22,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fd337d93f97a4e489da9c9885f1e071c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/fd337d93f97a4e489da9c9885f1e071c 2024-12-04T15:22:22,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/01fb0578a2964ddda053d3803ffa88c1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/01fb0578a2964ddda053d3803ffa88c1 2024-12-04T15:22:22,632 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7b6fb085ae0f4b11bc52714359f12ce3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/7b6fb085ae0f4b11bc52714359f12ce3 2024-12-04T15:22:22,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/91b1ea45fb6f45c8a434768f34a60884 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/91b1ea45fb6f45c8a434768f34a60884 2024-12-04T15:22:22,640 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/recovered.edits/413.seqid, newMaxSeqId=413, maxSeqId=4 2024-12-04T15:22:22,641 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501. 2024-12-04T15:22:22,641 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for 3bb1bcca60c6b0bcd1824e7ad9c8f501: 2024-12-04T15:22:22,643 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed 3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,643 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=3bb1bcca60c6b0bcd1824e7ad9c8f501, regionState=CLOSED 2024-12-04T15:22:22,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-04T15:22:22,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure 3bb1bcca60c6b0bcd1824e7ad9c8f501, server=645c2dbfef2e,42169,1733325683856 in 1.9910 sec 2024-12-04T15:22:22,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-04T15:22:22,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3bb1bcca60c6b0bcd1824e7ad9c8f501, UNASSIGN in 1.9950 sec 2024-12-04T15:22:22,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-04T15:22:22,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9980 sec 2024-12-04T15:22:22,650 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325742650"}]},"ts":"1733325742650"} 2024-12-04T15:22:22,651 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-04T15:22:22,653 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-04T15:22:22,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0120 sec 2024-12-04T15:22:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-04T15:22:22,750 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-04T15:22:22,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-04T15:22:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,752 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-04T15:22:22,754 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,756 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,759 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/recovered.edits] 2024-12-04T15:22:22,763 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a97d7a0b10b43ef86311d08085f95c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/6a97d7a0b10b43ef86311d08085f95c4 2024-12-04T15:22:22,764 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9b0abe35022e43a485deef9d2589dc32 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/9b0abe35022e43a485deef9d2589dc32 2024-12-04T15:22:22,766 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a94edea8d93b48c084a93961d7995fa2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/A/a94edea8d93b48c084a93961d7995fa2 2024-12-04T15:22:22,769 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/33643eef587d4eebbd461b6da255040b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/33643eef587d4eebbd461b6da255040b 2024-12-04T15:22:22,771 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5f30b3922e184a139820921416520c15 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/5f30b3922e184a139820921416520c15 2024-12-04T15:22:22,772 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e67e615364044ed0b1c44ce51800c220 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/B/e67e615364044ed0b1c44ce51800c220 2024-12-04T15:22:22,774 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/03a3a380cc36442f9227b1db90bdbb54 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/03a3a380cc36442f9227b1db90bdbb54 2024-12-04T15:22:22,776 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/08bfc14759d54c36b7c0246cc559d55c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/08bfc14759d54c36b7c0246cc559d55c 2024-12-04T15:22:22,777 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/72beb9ae50464e3eb6a2a7a70325bb38 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/C/72beb9ae50464e3eb6a2a7a70325bb38 2024-12-04T15:22:22,780 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/recovered.edits/413.seqid to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501/recovered.edits/413.seqid 2024-12-04T15:22:22,781 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,781 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-04T15:22:22,782 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-04T15:22:22,783 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-04T15:22:22,787 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120405480687f22541c68e205c5c210b2eb9_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120405480687f22541c68e205c5c210b2eb9_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,788 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120415915519e94746e3ae255350cfe75f1c_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120415915519e94746e3ae255350cfe75f1c_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,789 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042cc7103952be4a0d9f0a21876dd61a0f_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042cc7103952be4a0d9f0a21876dd61a0f_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,791 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204302f5a7d2d764c0ab316fb9459aa45ac_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204302f5a7d2d764c0ab316fb9459aa45ac_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,792 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044a5f54058e8e420187bc07b47021107c_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044a5f54058e8e420187bc07b47021107c_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,793 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044e0ca7f4666d41fba9b549a526053d6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044e0ca7f4666d41fba9b549a526053d6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,794 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120451a955ad2fc3406eaaad9d20e257bc6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120451a955ad2fc3406eaaad9d20e257bc6d_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,795 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204698af062376c41d5a44465605a361aa2_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204698af062376c41d5a44465605a361aa2_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,797 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046a6ab3070d8346a198b7e775cedbb98c_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046a6ab3070d8346a198b7e775cedbb98c_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,798 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046d18090e3ed94c26b22038a46b690526_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046d18090e3ed94c26b22038a46b690526_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,799 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204785c364835ac4a7492f5a64cec3c836a_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204785c364835ac4a7492f5a64cec3c836a_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,800 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412049dd8550f70604f638a688445a431b544_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412049dd8550f70604f638a688445a431b544_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,801 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c25c7efee61f43d4a550019484dd5e68_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c25c7efee61f43d4a550019484dd5e68_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,802 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204cc455cf7e0fe42339a03dc1520033ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204cc455cf7e0fe42339a03dc1520033ba5_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,804 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d3fa2a9b83164f79bdaa7ca93cbba6a6_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d3fa2a9b83164f79bdaa7ca93cbba6a6_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,805 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d413976a76d84b118ea2976ac3929c58_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d413976a76d84b118ea2976ac3929c58_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,806 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e4d492a9fad642629b7c9a6c0febedd7_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e4d492a9fad642629b7c9a6c0febedd7_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,807 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e7f03d373a524bd0acce528c5b917908_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e7f03d373a524bd0acce528c5b917908_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,808 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f9a8d40899f44f83bb7ba18c423053c2_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f9a8d40899f44f83bb7ba18c423053c2_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,809 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fbcb6d9ca58e479a857d2479d9fbc1b4_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fbcb6d9ca58e479a857d2479d9fbc1b4_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,811 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fed18bfe7f36403ba413ba7a2f0a52c8_3bb1bcca60c6b0bcd1824e7ad9c8f501 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fed18bfe7f36403ba413ba7a2f0a52c8_3bb1bcca60c6b0bcd1824e7ad9c8f501 2024-12-04T15:22:22,811 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-04T15:22:22,813 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,816 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-04T15:22:22,818 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-04T15:22:22,819 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,819 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-04T15:22:22,819 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733325742819"}]},"ts":"9223372036854775807"} 2024-12-04T15:22:22,821 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-04T15:22:22,821 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3bb1bcca60c6b0bcd1824e7ad9c8f501, NAME => 'TestAcidGuarantees,,1733325714019.3bb1bcca60c6b0bcd1824e7ad9c8f501.', STARTKEY => '', ENDKEY => ''}] 2024-12-04T15:22:22,821 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-04T15:22:22,821 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733325742821"}]},"ts":"9223372036854775807"} 2024-12-04T15:22:22,823 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-04T15:22:22,825 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 74 msec 2024-12-04T15:22:22,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-04T15:22:22,854 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-04T15:22:22,866 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=240 (was 240), OpenFileDescriptor=458 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=915 (was 920), ProcessCount=11 (was 11), AvailableMemoryMB=3279 (was 3764) 2024-12-04T15:22:22,877 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=240, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=915, ProcessCount=11, AvailableMemoryMB=3279 2024-12-04T15:22:22,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:22:22,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:22:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:22,881 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:22:22,881 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:22,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-12-04T15:22:22,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-04T15:22:22,882 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:22:22,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742088_1264 (size=963) 2024-12-04T15:22:22,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-04T15:22:23,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-04T15:22:23,291 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:22:23,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742089_1265 (size=53) 2024-12-04T15:22:23,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-04T15:22:23,627 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-04T15:22:23,697 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:22:23,697 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e068873424c7ff89600b835c0496bec4, disabling compactions & flushes 2024-12-04T15:22:23,697 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:23,697 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:23,697 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. after waiting 0 ms 2024-12-04T15:22:23,697 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:23,697 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:23,697 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:23,698 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:22:23,699 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733325743699"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325743699"}]},"ts":"1733325743699"} 2024-12-04T15:22:23,700 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:22:23,700 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:22:23,700 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325743700"}]},"ts":"1733325743700"} 2024-12-04T15:22:23,701 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-04T15:22:23,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, ASSIGN}] 2024-12-04T15:22:23,705 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, ASSIGN 2024-12-04T15:22:23,706 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:22:23,856 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e068873424c7ff89600b835c0496bec4, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:23,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:22:23,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-04T15:22:24,009 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:24,013 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:24,013 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:22:24,013 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,013 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:22:24,014 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,014 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,015 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,016 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:24,016 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e068873424c7ff89600b835c0496bec4 columnFamilyName A 2024-12-04T15:22:24,017 DEBUG [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:24,017 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.HStore(327): Store=e068873424c7ff89600b835c0496bec4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:24,017 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,018 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:24,018 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e068873424c7ff89600b835c0496bec4 columnFamilyName B 2024-12-04T15:22:24,018 DEBUG [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:24,019 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.HStore(327): Store=e068873424c7ff89600b835c0496bec4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:24,019 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,020 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:24,020 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e068873424c7ff89600b835c0496bec4 columnFamilyName C 2024-12-04T15:22:24,020 DEBUG [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:24,020 INFO [StoreOpener-e068873424c7ff89600b835c0496bec4-1 {}] regionserver.HStore(327): Store=e068873424c7ff89600b835c0496bec4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:24,021 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:24,021 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,021 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,023 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:22:24,024 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:24,026 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:22:24,027 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened e068873424c7ff89600b835c0496bec4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67082477, jitterRate=-3.931969404220581E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:22:24,028 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:24,029 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., pid=66, masterSystemTime=1733325744009 2024-12-04T15:22:24,030 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:24,030 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:24,030 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e068873424c7ff89600b835c0496bec4, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:24,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-04T15:22:24,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 in 173 msec 2024-12-04T15:22:24,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-04T15:22:24,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, ASSIGN in 328 msec 2024-12-04T15:22:24,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:22:24,035 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325744035"}]},"ts":"1733325744035"} 2024-12-04T15:22:24,036 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-04T15:22:24,038 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:22:24,040 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1590 sec 2024-12-04T15:22:24,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-04T15:22:24,987 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-12-04T15:22:24,988 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x474663e0 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e684827 2024-12-04T15:22:24,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44366854, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:24,993 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:24,994 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:24,995 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:22:24,996 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:22:24,999 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3052ca92 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cd795cf 2024-12-04T15:22:25,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b543ac9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,003 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76744267 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@562dc88b 2024-12-04T15:22:25,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c6522f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38d7ecf1 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@602f0 2024-12-04T15:22:25,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3541fa8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,011 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79badc6f to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@499b502a 2024-12-04T15:22:25,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73345e14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,014 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a60caef to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5969d989 2024-12-04T15:22:25,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8fce84e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,018 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x072b9016 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@687aa625 2024-12-04T15:22:25,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2513c73e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,022 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x231730e5 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ee1f12c 2024-12-04T15:22:25,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f0fff9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,025 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a7e7237 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b8794c5 2024-12-04T15:22:25,028 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49aaa17b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,029 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07498874 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503f05e4 2024-12-04T15:22:25,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c82c7e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,041 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74fdeaca to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cc5c194 2024-12-04T15:22:25,044 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a2a822, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:25,048 DEBUG [hconnection-0x310497b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:25,048 DEBUG [hconnection-0x3875d84e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,049 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,050 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,050 DEBUG [hconnection-0x77e8ff4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-04T15:22:25,051 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-04T15:22:25,051 DEBUG [hconnection-0x67cb09ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,052 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:25,052 DEBUG [hconnection-0xc377428-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,052 DEBUG [hconnection-0x3e34ea1c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,052 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,052 DEBUG [hconnection-0x18432d96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,053 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,053 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:25,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:25,054 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,055 DEBUG [hconnection-0x6b15d3f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,057 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,058 DEBUG [hconnection-0x4fade4a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,058 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,060 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,061 DEBUG [hconnection-0x4e239f4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:25,063 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41150, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:25,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:25,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325805091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325805093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/a1970203652c47279396439de167e009 is 50, key is test_row_0/A:col10/1733325745060/Put/seqid=0 2024-12-04T15:22:25,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325805093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325805095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325805097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742090_1266 (size=12001) 2024-12-04T15:22:25,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-04T15:22:25,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325805199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325805201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325805201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,206 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325805201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325805202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-04T15:22:25,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:25,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-04T15:22:25,372 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-04T15:22:25,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325805405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325805407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325805407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325805416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325805418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,526 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-04T15:22:25,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:25,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/a1970203652c47279396439de167e009 2024-12-04T15:22:25,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/083f9630898b4a9197bd572e7c6b7b18 is 50, key is test_row_0/B:col10/1733325745060/Put/seqid=0 2024-12-04T15:22:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-04T15:22:25,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742091_1267 (size=12001) 2024-12-04T15:22:25,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/083f9630898b4a9197bd572e7c6b7b18 2024-12-04T15:22:25,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-04T15:22:25,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:25,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:25,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/1d5ea073886547c9b56ec18696a379b6 is 50, key is test_row_0/C:col10/1733325745060/Put/seqid=0 2024-12-04T15:22:25,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325805724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325805725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325805726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325805726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325805732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742092_1268 (size=12001) 2024-12-04T15:22:25,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/1d5ea073886547c9b56ec18696a379b6 2024-12-04T15:22:25,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/a1970203652c47279396439de167e009 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/a1970203652c47279396439de167e009 2024-12-04T15:22:25,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/a1970203652c47279396439de167e009, entries=150, sequenceid=15, filesize=11.7 K 2024-12-04T15:22:25,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/083f9630898b4a9197bd572e7c6b7b18 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/083f9630898b4a9197bd572e7c6b7b18 2024-12-04T15:22:25,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/083f9630898b4a9197bd572e7c6b7b18, entries=150, sequenceid=15, filesize=11.7 K 2024-12-04T15:22:25,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/1d5ea073886547c9b56ec18696a379b6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/1d5ea073886547c9b56ec18696a379b6 2024-12-04T15:22:25,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/1d5ea073886547c9b56ec18696a379b6, entries=150, sequenceid=15, filesize=11.7 K 2024-12-04T15:22:25,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e068873424c7ff89600b835c0496bec4 in 730ms, sequenceid=15, compaction requested=false 2024-12-04T15:22:25,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:25,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:25,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-04T15:22:25,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:25,864 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:22:25,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:25,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:25,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:25,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:25,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:25,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:25,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e23f20c1a6484c7b85f58d913c3d088a is 50, key is test_row_0/A:col10/1733325745095/Put/seqid=0 2024-12-04T15:22:25,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742093_1269 (size=12001) 2024-12-04T15:22:25,897 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e23f20c1a6484c7b85f58d913c3d088a 2024-12-04T15:22:25,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f73480c197344a48a2770fd071c1c46b is 50, key is test_row_0/B:col10/1733325745095/Put/seqid=0 2024-12-04T15:22:25,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742094_1270 (size=12001) 2024-12-04T15:22:25,951 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f73480c197344a48a2770fd071c1c46b 2024-12-04T15:22:25,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b7094e4c1f104b4db540dcb7cc8d344c is 50, key is test_row_0/C:col10/1733325745095/Put/seqid=0 2024-12-04T15:22:25,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742095_1271 (size=12001) 2024-12-04T15:22:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-04T15:22:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:26,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:26,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325806245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325806245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325806246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325806245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325806246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325806351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325806351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325806351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325806351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325806352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,397 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b7094e4c1f104b4db540dcb7cc8d344c 2024-12-04T15:22:26,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e23f20c1a6484c7b85f58d913c3d088a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e23f20c1a6484c7b85f58d913c3d088a 2024-12-04T15:22:26,408 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e23f20c1a6484c7b85f58d913c3d088a, entries=150, sequenceid=37, filesize=11.7 K 2024-12-04T15:22:26,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f73480c197344a48a2770fd071c1c46b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f73480c197344a48a2770fd071c1c46b 2024-12-04T15:22:26,413 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f73480c197344a48a2770fd071c1c46b, entries=150, sequenceid=37, filesize=11.7 K 2024-12-04T15:22:26,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b7094e4c1f104b4db540dcb7cc8d344c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b7094e4c1f104b4db540dcb7cc8d344c 2024-12-04T15:22:26,435 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b7094e4c1f104b4db540dcb7cc8d344c, entries=150, sequenceid=37, filesize=11.7 K 2024-12-04T15:22:26,436 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for e068873424c7ff89600b835c0496bec4 in 572ms, sequenceid=37, compaction requested=false 2024-12-04T15:22:26,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:26,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:26,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-04T15:22:26,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-04T15:22:26,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-04T15:22:26,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3850 sec 2024-12-04T15:22:26,442 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.3920 sec 2024-12-04T15:22:26,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-04T15:22:26,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:26,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b8cd24914a1b4ba2a6aa502baea7e858 is 50, key is test_row_0/A:col10/1733325746245/Put/seqid=0 2024-12-04T15:22:26,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325806577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325806586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325806586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325806588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325806588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742096_1272 (size=12001) 2024-12-04T15:22:26,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b8cd24914a1b4ba2a6aa502baea7e858 2024-12-04T15:22:26,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/af1fa52b46ff4c1895a9096360020367 is 50, key is test_row_0/B:col10/1733325746245/Put/seqid=0 2024-12-04T15:22:26,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742097_1273 (size=12001) 2024-12-04T15:22:26,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/af1fa52b46ff4c1895a9096360020367 2024-12-04T15:22:26,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d626e4e437ce4c65aefbcc92c063f3b8 is 50, key is test_row_0/C:col10/1733325746245/Put/seqid=0 2024-12-04T15:22:26,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742098_1274 (size=12001) 2024-12-04T15:22:26,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325806688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325806695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325806695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325806695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325806695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325806892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325806898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325806899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325806900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:26,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:26,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325806901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d626e4e437ce4c65aefbcc92c063f3b8 2024-12-04T15:22:27,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b8cd24914a1b4ba2a6aa502baea7e858 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b8cd24914a1b4ba2a6aa502baea7e858 2024-12-04T15:22:27,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b8cd24914a1b4ba2a6aa502baea7e858, entries=150, sequenceid=56, filesize=11.7 K 2024-12-04T15:22:27,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/af1fa52b46ff4c1895a9096360020367 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/af1fa52b46ff4c1895a9096360020367 2024-12-04T15:22:27,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/af1fa52b46ff4c1895a9096360020367, entries=150, sequenceid=56, filesize=11.7 K 2024-12-04T15:22:27,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d626e4e437ce4c65aefbcc92c063f3b8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d626e4e437ce4c65aefbcc92c063f3b8 2024-12-04T15:22:27,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d626e4e437ce4c65aefbcc92c063f3b8, entries=150, sequenceid=56, filesize=11.7 K 2024-12-04T15:22:27,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for e068873424c7ff89600b835c0496bec4 in 555ms, sequenceid=56, compaction requested=true 2024-12-04T15:22:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:27,114 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:27,115 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:27,116 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:27,116 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,116 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/a1970203652c47279396439de167e009, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e23f20c1a6484c7b85f58d913c3d088a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b8cd24914a1b4ba2a6aa502baea7e858] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=35.2 K 2024-12-04T15:22:27,120 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1970203652c47279396439de167e009, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733325745060 2024-12-04T15:22:27,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:27,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:27,121 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:27,121 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e23f20c1a6484c7b85f58d913c3d088a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733325745086 2024-12-04T15:22:27,122 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8cd24914a1b4ba2a6aa502baea7e858, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325746245 2024-12-04T15:22:27,122 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:27,122 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:27,122 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,122 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/083f9630898b4a9197bd572e7c6b7b18, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f73480c197344a48a2770fd071c1c46b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/af1fa52b46ff4c1895a9096360020367] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=35.2 K 2024-12-04T15:22:27,123 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 083f9630898b4a9197bd572e7c6b7b18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733325745060 2024-12-04T15:22:27,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:27,124 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f73480c197344a48a2770fd071c1c46b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733325745086 2024-12-04T15:22:27,125 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting af1fa52b46ff4c1895a9096360020367, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325746245 2024-12-04T15:22:27,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:27,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:27,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:27,142 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:27,143 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/fd4695fb12424ccd95faed2402a332c4 is 50, key is test_row_0/A:col10/1733325746245/Put/seqid=0 2024-12-04T15:22:27,150 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#227 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:27,150 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/53646222ddbf4d79b3ca577cb5c32691 is 50, key is test_row_0/B:col10/1733325746245/Put/seqid=0 2024-12-04T15:22:27,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-04T15:22:27,156 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-04T15:22:27,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:27,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-04T15:22:27,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-04T15:22:27,160 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:27,160 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:27,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:27,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:27,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-04T15:22:27,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:27,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:27,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:27,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742099_1275 (size=12104) 2024-12-04T15:22:27,223 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/fd4695fb12424ccd95faed2402a332c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/fd4695fb12424ccd95faed2402a332c4 2024-12-04T15:22:27,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325807216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325807219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325807221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325807222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325807223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742100_1276 (size=12104) 2024-12-04T15:22:27,236 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/53646222ddbf4d79b3ca577cb5c32691 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/53646222ddbf4d79b3ca577cb5c32691 2024-12-04T15:22:27,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/f0f7cea1c54c40a3a1069acb28f0bfbc is 50, key is test_row_0/A:col10/1733325747197/Put/seqid=0 2024-12-04T15:22:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-04T15:22:27,261 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 53646222ddbf4d79b3ca577cb5c32691(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:27,261 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:27,261 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325747121; duration=0sec 2024-12-04T15:22:27,261 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:27,261 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:27,261 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:27,262 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into fd4695fb12424ccd95faed2402a332c4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:27,262 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:27,262 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325747114; duration=0sec 2024-12-04T15:22:27,262 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:27,262 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:27,270 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:27,270 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:27,270 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,272 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/1d5ea073886547c9b56ec18696a379b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b7094e4c1f104b4db540dcb7cc8d344c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d626e4e437ce4c65aefbcc92c063f3b8] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=35.2 K 2024-12-04T15:22:27,273 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d5ea073886547c9b56ec18696a379b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733325745060 2024-12-04T15:22:27,274 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b7094e4c1f104b4db540dcb7cc8d344c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733325745086 2024-12-04T15:22:27,274 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d626e4e437ce4c65aefbcc92c063f3b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325746245 2024-12-04T15:22:27,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742101_1277 (size=12001) 2024-12-04T15:22:27,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/f0f7cea1c54c40a3a1069acb28f0bfbc 2024-12-04T15:22:27,304 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#229 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:27,305 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d028aae7e55c4b4a9ac56e50d7398569 is 50, key is test_row_0/C:col10/1733325746245/Put/seqid=0 2024-12-04T15:22:27,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/dfdfab13ddd64851a74aea28470e2750 is 50, key is test_row_0/B:col10/1733325747197/Put/seqid=0 2024-12-04T15:22:27,316 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-04T15:22:27,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:27,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:27,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:27,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:27,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325807325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742102_1278 (size=12104) 2024-12-04T15:22:27,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325807327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325807327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325807330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325807331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742103_1279 (size=12001) 2024-12-04T15:22:27,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/dfdfab13ddd64851a74aea28470e2750 2024-12-04T15:22:27,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/34eae6a915184651abe4c974a748cd68 is 50, key is test_row_0/C:col10/1733325747197/Put/seqid=0 2024-12-04T15:22:27,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742104_1280 (size=12001) 2024-12-04T15:22:27,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/34eae6a915184651abe4c974a748cd68 2024-12-04T15:22:27,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/f0f7cea1c54c40a3a1069acb28f0bfbc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/f0f7cea1c54c40a3a1069acb28f0bfbc 2024-12-04T15:22:27,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/f0f7cea1c54c40a3a1069acb28f0bfbc, entries=150, sequenceid=75, filesize=11.7 K 2024-12-04T15:22:27,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/dfdfab13ddd64851a74aea28470e2750 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dfdfab13ddd64851a74aea28470e2750 2024-12-04T15:22:27,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dfdfab13ddd64851a74aea28470e2750, entries=150, sequenceid=75, filesize=11.7 K 2024-12-04T15:22:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/34eae6a915184651abe4c974a748cd68 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/34eae6a915184651abe4c974a748cd68 2024-12-04T15:22:27,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/34eae6a915184651abe4c974a748cd68, entries=150, sequenceid=75, filesize=11.7 K 2024-12-04T15:22:27,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for e068873424c7ff89600b835c0496bec4 in 192ms, sequenceid=75, compaction requested=false 2024-12-04T15:22:27,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-04T15:22:27,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,471 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c2c9b90a98304636b1bd1ab15fb8ffe8 is 50, key is test_row_0/A:col10/1733325747217/Put/seqid=0 2024-12-04T15:22:27,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742105_1281 (size=12001) 2024-12-04T15:22:27,516 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c2c9b90a98304636b1bd1ab15fb8ffe8 2024-12-04T15:22:27,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/97d470e6a13a49edb2d3e01a396fab22 is 50, key is test_row_0/B:col10/1733325747217/Put/seqid=0 2024-12-04T15:22:27,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:27,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742106_1282 (size=12001) 2024-12-04T15:22:27,545 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/97d470e6a13a49edb2d3e01a396fab22 2024-12-04T15:22:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325807546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325807547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325807547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325807551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325807552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2730ceb39a3947ecbbad074a99dfdff1 is 50, key is test_row_0/C:col10/1733325747217/Put/seqid=0 2024-12-04T15:22:27,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742107_1283 (size=12001) 2024-12-04T15:22:27,583 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2730ceb39a3947ecbbad074a99dfdff1 2024-12-04T15:22:27,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c2c9b90a98304636b1bd1ab15fb8ffe8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c2c9b90a98304636b1bd1ab15fb8ffe8 2024-12-04T15:22:27,608 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c2c9b90a98304636b1bd1ab15fb8ffe8, entries=150, sequenceid=94, filesize=11.7 K 2024-12-04T15:22:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/97d470e6a13a49edb2d3e01a396fab22 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/97d470e6a13a49edb2d3e01a396fab22 2024-12-04T15:22:27,615 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/97d470e6a13a49edb2d3e01a396fab22, entries=150, sequenceid=94, filesize=11.7 K 2024-12-04T15:22:27,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2730ceb39a3947ecbbad074a99dfdff1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2730ceb39a3947ecbbad074a99dfdff1 2024-12-04T15:22:27,623 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2730ceb39a3947ecbbad074a99dfdff1, entries=150, sequenceid=94, filesize=11.7 K 2024-12-04T15:22:27,625 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for e068873424c7ff89600b835c0496bec4 in 154ms, sequenceid=94, compaction requested=true 2024-12-04T15:22:27,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:27,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-04T15:22:27,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-04T15:22:27,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-04T15:22:27,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 467 msec 2024-12-04T15:22:27,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 473 msec 2024-12-04T15:22:27,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:22:27,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:27,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:27,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:27,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:27,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:27,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c3e8b544879a4ae5b2a7da4349bb93aa is 50, key is test_row_0/A:col10/1733325747546/Put/seqid=0 2024-12-04T15:22:27,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325807679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325807680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325807680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325807682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325807683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742108_1284 (size=14341) 2024-12-04T15:22:27,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c3e8b544879a4ae5b2a7da4349bb93aa 2024-12-04T15:22:27,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f0d4080c8bd3403881718742d80f6cd5 is 50, key is test_row_0/B:col10/1733325747546/Put/seqid=0 2024-12-04T15:22:27,745 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d028aae7e55c4b4a9ac56e50d7398569 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d028aae7e55c4b4a9ac56e50d7398569 2024-12-04T15:22:27,756 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into d028aae7e55c4b4a9ac56e50d7398569(size=11.8 K), total size for store is 35.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:27,756 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:27,756 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325747126; duration=0sec 2024-12-04T15:22:27,756 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:27,756 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-04T15:22:27,763 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-04T15:22:27,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-04T15:22:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-04T15:22:27,766 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:27,767 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:27,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:27,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742109_1285 (size=12001) 2024-12-04T15:22:27,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325807788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325807789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325807789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325807789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325807790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-04T15:22:27,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:27,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:27,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:27,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:27,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:27,928 DEBUG [master/645c2dbfef2e:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 369c833ab6e0e1ae5f4d743d2988012a changed from -1.0 to 0.0, refreshing cache 2024-12-04T15:22:27,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325807992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325807993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:27,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:27,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325807994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325808004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325808004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-04T15:22:28,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f0d4080c8bd3403881718742d80f6cd5 2024-12-04T15:22:28,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ad742e9ecc0c4883959b88ea9fdc0957 is 50, key is test_row_0/C:col10/1733325747546/Put/seqid=0 2024-12-04T15:22:28,184 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T15:22:28,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742110_1286 (size=12001) 2024-12-04T15:22:28,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ad742e9ecc0c4883959b88ea9fdc0957 2024-12-04T15:22:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c3e8b544879a4ae5b2a7da4349bb93aa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c3e8b544879a4ae5b2a7da4349bb93aa 2024-12-04T15:22:28,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c3e8b544879a4ae5b2a7da4349bb93aa, entries=200, sequenceid=115, filesize=14.0 K 2024-12-04T15:22:28,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f0d4080c8bd3403881718742d80f6cd5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f0d4080c8bd3403881718742d80f6cd5 2024-12-04T15:22:28,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f0d4080c8bd3403881718742d80f6cd5, entries=150, sequenceid=115, filesize=11.7 K 2024-12-04T15:22:28,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ad742e9ecc0c4883959b88ea9fdc0957 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ad742e9ecc0c4883959b88ea9fdc0957 2024-12-04T15:22:28,228 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ad742e9ecc0c4883959b88ea9fdc0957, entries=150, sequenceid=115, filesize=11.7 K 2024-12-04T15:22:28,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e068873424c7ff89600b835c0496bec4 in 575ms, sequenceid=115, compaction requested=true 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:28,232 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:28,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:28,232 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:28,234 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:28,234 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:28,234 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:28,234 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:28,234 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,234 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,234 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/fd4695fb12424ccd95faed2402a332c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/f0f7cea1c54c40a3a1069acb28f0bfbc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c2c9b90a98304636b1bd1ab15fb8ffe8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c3e8b544879a4ae5b2a7da4349bb93aa] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=49.3 K 2024-12-04T15:22:28,234 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/53646222ddbf4d79b3ca577cb5c32691, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dfdfab13ddd64851a74aea28470e2750, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/97d470e6a13a49edb2d3e01a396fab22, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f0d4080c8bd3403881718742d80f6cd5] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=47.0 K 2024-12-04T15:22:28,235 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53646222ddbf4d79b3ca577cb5c32691, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325746245 2024-12-04T15:22:28,235 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fd4695fb12424ccd95faed2402a332c4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325746245 2024-12-04T15:22:28,235 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfdfab13ddd64851a74aea28470e2750, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733325746579 2024-12-04T15:22:28,235 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f0f7cea1c54c40a3a1069acb28f0bfbc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733325746579 2024-12-04T15:22:28,236 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97d470e6a13a49edb2d3e01a396fab22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733325747217 2024-12-04T15:22:28,236 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c2c9b90a98304636b1bd1ab15fb8ffe8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733325747217 2024-12-04T15:22:28,236 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0d4080c8bd3403881718742d80f6cd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733325747546 2024-12-04T15:22:28,236 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c3e8b544879a4ae5b2a7da4349bb93aa, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733325747546 2024-12-04T15:22:28,251 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#238 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:28,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/55a202e27cf14bb1bda23e99439c5126 is 50, key is test_row_0/A:col10/1733325747546/Put/seqid=0 2024-12-04T15:22:28,255 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#239 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:28,256 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/5b8e8a92e4b34df2b5c8309121e37125 is 50, key is test_row_0/B:col10/1733325747546/Put/seqid=0 2024-12-04T15:22:28,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742111_1287 (size=12241) 2024-12-04T15:22:28,269 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/55a202e27cf14bb1bda23e99439c5126 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/55a202e27cf14bb1bda23e99439c5126 2024-12-04T15:22:28,276 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 55a202e27cf14bb1bda23e99439c5126(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:28,276 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:28,276 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=12, startTime=1733325748232; duration=0sec 2024-12-04T15:22:28,277 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:28,277 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:28,277 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:28,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:28,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:28,279 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,279 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d028aae7e55c4b4a9ac56e50d7398569, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/34eae6a915184651abe4c974a748cd68, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2730ceb39a3947ecbbad074a99dfdff1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ad742e9ecc0c4883959b88ea9fdc0957] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=47.0 K 2024-12-04T15:22:28,280 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d028aae7e55c4b4a9ac56e50d7398569, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733325746245 2024-12-04T15:22:28,280 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 34eae6a915184651abe4c974a748cd68, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733325746579 2024-12-04T15:22:28,281 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2730ceb39a3947ecbbad074a99dfdff1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733325747217 2024-12-04T15:22:28,281 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ad742e9ecc0c4883959b88ea9fdc0957, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733325747546 2024-12-04T15:22:28,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742112_1288 (size=12241) 2024-12-04T15:22:28,300 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#240 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:28,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:28,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-04T15:22:28,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:28,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:28,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:28,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:28,301 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ab8c67397b1f410aafdcf80a5694e8cb is 50, key is test_row_0/C:col10/1733325747546/Put/seqid=0 2024-12-04T15:22:28,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:28,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:28,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742113_1289 (size=12241) 2024-12-04T15:22:28,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/0e1e2f99f3854e27904b0e96c9cb0ab3 is 50, key is test_row_0/A:col10/1733325747680/Put/seqid=0 2024-12-04T15:22:28,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742114_1290 (size=12101) 2024-12-04T15:22:28,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-04T15:22:28,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325808369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325808369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325808376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325808377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325808378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,382 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325808479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325808480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325808481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325808483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325808484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,535 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325808683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,689 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325808688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325808688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325808689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325808691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,706 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/5b8e8a92e4b34df2b5c8309121e37125 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5b8e8a92e4b34df2b5c8309121e37125 2024-12-04T15:22:28,720 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 5b8e8a92e4b34df2b5c8309121e37125(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:28,720 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:28,720 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=12, startTime=1733325748232; duration=0sec 2024-12-04T15:22:28,720 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:28,720 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:28,726 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ab8c67397b1f410aafdcf80a5694e8cb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ab8c67397b1f410aafdcf80a5694e8cb 2024-12-04T15:22:28,734 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into ab8c67397b1f410aafdcf80a5694e8cb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:28,734 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:28,734 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=12, startTime=1733325748232; duration=0sec 2024-12-04T15:22:28,734 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:28,734 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:28,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/0e1e2f99f3854e27904b0e96c9cb0ab3 2024-12-04T15:22:28,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/b167aeacb9f3424abf247844cccb94e2 is 50, key is test_row_0/B:col10/1733325747680/Put/seqid=0 2024-12-04T15:22:28,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742115_1291 (size=12101) 2024-12-04T15:22:28,843 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-04T15:22:28,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325808987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325808992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325808993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325808993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325808994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,996 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:28,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:28,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:28,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:28,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:28,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:29,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:29,151 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:29,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:29,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:29,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/b167aeacb9f3424abf247844cccb94e2 2024-12-04T15:22:29,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/eda26424d85b46e4b6f6334442a47f5b is 50, key is test_row_0/C:col10/1733325747680/Put/seqid=0 2024-12-04T15:22:29,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742116_1292 (size=12101) 2024-12-04T15:22:29,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/eda26424d85b46e4b6f6334442a47f5b 2024-12-04T15:22:29,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/0e1e2f99f3854e27904b0e96c9cb0ab3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/0e1e2f99f3854e27904b0e96c9cb0ab3 2024-12-04T15:22:29,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/0e1e2f99f3854e27904b0e96c9cb0ab3, entries=150, sequenceid=133, filesize=11.8 K 2024-12-04T15:22:29,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/b167aeacb9f3424abf247844cccb94e2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/b167aeacb9f3424abf247844cccb94e2 2024-12-04T15:22:29,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/b167aeacb9f3424abf247844cccb94e2, entries=150, sequenceid=133, filesize=11.8 K 2024-12-04T15:22:29,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/eda26424d85b46e4b6f6334442a47f5b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/eda26424d85b46e4b6f6334442a47f5b 2024-12-04T15:22:29,283 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/eda26424d85b46e4b6f6334442a47f5b, entries=150, sequenceid=133, filesize=11.8 K 2024-12-04T15:22:29,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e068873424c7ff89600b835c0496bec4 in 988ms, sequenceid=133, compaction requested=false 2024-12-04T15:22:29,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:29,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-04T15:22:29,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:29,308 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-04T15:22:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:29,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/02d4c81b682b44e882d69a7bcb93fc2c is 50, key is test_row_0/A:col10/1733325748376/Put/seqid=0 2024-12-04T15:22:29,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742117_1293 (size=12151) 2024-12-04T15:22:29,327 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/02d4c81b682b44e882d69a7bcb93fc2c 2024-12-04T15:22:29,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/d7085a756c0a4475a737e97cb98c3b3f is 50, key is test_row_0/B:col10/1733325748376/Put/seqid=0 2024-12-04T15:22:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742118_1294 (size=12151) 2024-12-04T15:22:29,355 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/d7085a756c0a4475a737e97cb98c3b3f 2024-12-04T15:22:29,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/4c4d4dc4b4da4e01bf94078f334e0318 is 50, key is test_row_0/C:col10/1733325748376/Put/seqid=0 2024-12-04T15:22:29,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742119_1295 (size=12151) 2024-12-04T15:22:29,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:29,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:29,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325809505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325809506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325809507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325809508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325809508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325809610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325809610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325809610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325809611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325809611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325809812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325809813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325809814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325809815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325809815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:29,835 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/4c4d4dc4b4da4e01bf94078f334e0318 2024-12-04T15:22:29,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/02d4c81b682b44e882d69a7bcb93fc2c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/02d4c81b682b44e882d69a7bcb93fc2c 2024-12-04T15:22:29,846 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/02d4c81b682b44e882d69a7bcb93fc2c, entries=150, sequenceid=155, filesize=11.9 K 2024-12-04T15:22:29,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/d7085a756c0a4475a737e97cb98c3b3f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/d7085a756c0a4475a737e97cb98c3b3f 2024-12-04T15:22:29,851 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/d7085a756c0a4475a737e97cb98c3b3f, entries=150, sequenceid=155, filesize=11.9 K 2024-12-04T15:22:29,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/4c4d4dc4b4da4e01bf94078f334e0318 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4c4d4dc4b4da4e01bf94078f334e0318 2024-12-04T15:22:29,857 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4c4d4dc4b4da4e01bf94078f334e0318, entries=150, sequenceid=155, filesize=11.9 K 2024-12-04T15:22:29,858 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e068873424c7ff89600b835c0496bec4 in 550ms, sequenceid=155, compaction requested=true 2024-12-04T15:22:29,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:29,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:29,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-04T15:22:29,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-04T15:22:29,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-04T15:22:29,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0920 sec 2024-12-04T15:22:29,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 2.0980 sec 2024-12-04T15:22:29,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-04T15:22:29,870 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-04T15:22:29,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-04T15:22:29,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-04T15:22:29,873 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:29,874 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:29,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-04T15:22:30,025 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-04T15:22:30,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:30,027 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-04T15:22:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e6d515fc4d8948599992e690b428b476 is 50, key is test_row_0/A:col10/1733325749504/Put/seqid=0 2024-12-04T15:22:30,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742120_1296 (size=12151) 2024-12-04T15:22:30,048 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e6d515fc4d8948599992e690b428b476 2024-12-04T15:22:30,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/74efb0f56232425ebe0216763370581e is 50, key is test_row_0/B:col10/1733325749504/Put/seqid=0 2024-12-04T15:22:30,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742121_1297 (size=12151) 2024-12-04T15:22:30,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:30,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:30,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325810135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325810135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325810135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325810139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325810140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-04T15:22:30,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325810241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325810241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325810242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325810244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325810245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325810445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325810445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325810446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325810447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325810448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-04T15:22:30,486 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/74efb0f56232425ebe0216763370581e 2024-12-04T15:22:30,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2c119466f513469f98c7235117b8f873 is 50, key is test_row_0/C:col10/1733325749504/Put/seqid=0 2024-12-04T15:22:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742122_1298 (size=12151) 2024-12-04T15:22:30,500 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2c119466f513469f98c7235117b8f873 2024-12-04T15:22:30,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e6d515fc4d8948599992e690b428b476 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e6d515fc4d8948599992e690b428b476 2024-12-04T15:22:30,508 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e6d515fc4d8948599992e690b428b476, entries=150, sequenceid=171, filesize=11.9 K 2024-12-04T15:22:30,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/74efb0f56232425ebe0216763370581e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/74efb0f56232425ebe0216763370581e 2024-12-04T15:22:30,522 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/74efb0f56232425ebe0216763370581e, entries=150, sequenceid=171, filesize=11.9 K 2024-12-04T15:22:30,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2c119466f513469f98c7235117b8f873 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2c119466f513469f98c7235117b8f873 2024-12-04T15:22:30,528 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2c119466f513469f98c7235117b8f873, entries=150, sequenceid=171, filesize=11.9 K 2024-12-04T15:22:30,529 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e068873424c7ff89600b835c0496bec4 in 503ms, sequenceid=171, compaction requested=true 2024-12-04T15:22:30,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:30,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:30,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-04T15:22:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-04T15:22:30,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-04T15:22:30,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 656 msec 2024-12-04T15:22:30,533 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 660 msec 2024-12-04T15:22:30,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:30,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:22:30,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:30,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:30,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:30,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:30,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:30,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:30,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c0b28fc2fd264d26978a490620219428 is 50, key is test_row_0/A:col10/1733325750753/Put/seqid=0 2024-12-04T15:22:30,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742123_1299 (size=14541) 2024-12-04T15:22:30,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c0b28fc2fd264d26978a490620219428 2024-12-04T15:22:30,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325810762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325810763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325810764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325810769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325810770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1e8e48cd6ffa49b485ad37f00da0a673 is 50, key is test_row_0/B:col10/1733325750753/Put/seqid=0 2024-12-04T15:22:30,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742124_1300 (size=12151) 2024-12-04T15:22:30,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1e8e48cd6ffa49b485ad37f00da0a673 2024-12-04T15:22:30,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/add500ba3243417d9cef7fb62d4d18fe is 50, key is test_row_0/C:col10/1733325750753/Put/seqid=0 2024-12-04T15:22:30,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742125_1301 (size=12151) 2024-12-04T15:22:30,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325810870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325810870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325810871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325810874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:30,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325810874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:30,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-04T15:22:30,976 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-04T15:22:30,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:30,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-04T15:22:30,979 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:30,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-04T15:22:30,980 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:30,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325811072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325811076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325811076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325811077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325811077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-04T15:22:31,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-04T15:22:31,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:31,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:31,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:31,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:31,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/add500ba3243417d9cef7fb62d4d18fe 2024-12-04T15:22:31,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c0b28fc2fd264d26978a490620219428 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0b28fc2fd264d26978a490620219428 2024-12-04T15:22:31,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0b28fc2fd264d26978a490620219428, entries=200, sequenceid=194, filesize=14.2 K 2024-12-04T15:22:31,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1e8e48cd6ffa49b485ad37f00da0a673 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1e8e48cd6ffa49b485ad37f00da0a673 2024-12-04T15:22:31,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1e8e48cd6ffa49b485ad37f00da0a673, entries=150, sequenceid=194, filesize=11.9 K 2024-12-04T15:22:31,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/add500ba3243417d9cef7fb62d4d18fe as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/add500ba3243417d9cef7fb62d4d18fe 2024-12-04T15:22:31,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/add500ba3243417d9cef7fb62d4d18fe, entries=150, sequenceid=194, filesize=11.9 K 2024-12-04T15:22:31,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e068873424c7ff89600b835c0496bec4 in 489ms, sequenceid=194, compaction requested=true 2024-12-04T15:22:31,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:31,243 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-04T15:22:31,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:31,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:31,244 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-04T15:22:31,245 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 63185 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-04T15:22:31,245 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:31,245 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,245 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/55a202e27cf14bb1bda23e99439c5126, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/0e1e2f99f3854e27904b0e96c9cb0ab3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/02d4c81b682b44e882d69a7bcb93fc2c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e6d515fc4d8948599992e690b428b476, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0b28fc2fd264d26978a490620219428] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=61.7 K 2024-12-04T15:22:31,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:31,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:31,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:31,245 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55a202e27cf14bb1bda23e99439c5126, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733325747546 2024-12-04T15:22:31,246 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e1e2f99f3854e27904b0e96c9cb0ab3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733325747680 2024-12-04T15:22:31,246 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60795 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-04T15:22:31,246 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:31,247 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,247 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5b8e8a92e4b34df2b5c8309121e37125, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/b167aeacb9f3424abf247844cccb94e2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/d7085a756c0a4475a737e97cb98c3b3f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/74efb0f56232425ebe0216763370581e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1e8e48cd6ffa49b485ad37f00da0a673] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=59.4 K 2024-12-04T15:22:31,247 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02d4c81b682b44e882d69a7bcb93fc2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733325748355 2024-12-04T15:22:31,247 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b8e8a92e4b34df2b5c8309121e37125, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733325747546 2024-12-04T15:22:31,247 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6d515fc4d8948599992e690b428b476, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733325749504 2024-12-04T15:22:31,248 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b167aeacb9f3424abf247844cccb94e2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733325747680 2024-12-04T15:22:31,248 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0b28fc2fd264d26978a490620219428, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733325750134 2024-12-04T15:22:31,248 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d7085a756c0a4475a737e97cb98c3b3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733325748355 2024-12-04T15:22:31,249 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 74efb0f56232425ebe0216763370581e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733325749504 2024-12-04T15:22:31,250 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e8e48cd6ffa49b485ad37f00da0a673, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733325750134 2024-12-04T15:22:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:31,261 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#253 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:31,261 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/0270ea179f07423883bfd5cd0c672618 is 50, key is test_row_0/B:col10/1733325750753/Put/seqid=0 2024-12-04T15:22:31,267 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#254 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:31,267 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d0df8282bf7541f8a70d574e6077dfc9 is 50, key is test_row_0/A:col10/1733325750753/Put/seqid=0 2024-12-04T15:22:31,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-04T15:22:31,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742126_1302 (size=12561) 2024-12-04T15:22:31,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-04T15:22:31,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,287 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:22:31,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:31,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:31,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:31,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:31,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:31,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:31,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742127_1303 (size=12561) 2024-12-04T15:22:31,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/baef8709ce0148908f186e734a4584bb is 50, key is test_row_1/A:col10/1733325750762/Put/seqid=0 2024-12-04T15:22:31,318 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d0df8282bf7541f8a70d574e6077dfc9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d0df8282bf7541f8a70d574e6077dfc9 2024-12-04T15:22:31,325 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into d0df8282bf7541f8a70d574e6077dfc9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:31,325 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:31,325 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=11, startTime=1733325751243; duration=0sec 2024-12-04T15:22:31,326 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:31,326 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:31,326 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-04T15:22:31,328 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60795 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-04T15:22:31,328 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:31,328 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,328 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ab8c67397b1f410aafdcf80a5694e8cb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/eda26424d85b46e4b6f6334442a47f5b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4c4d4dc4b4da4e01bf94078f334e0318, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2c119466f513469f98c7235117b8f873, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/add500ba3243417d9cef7fb62d4d18fe] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=59.4 K 2024-12-04T15:22:31,329 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab8c67397b1f410aafdcf80a5694e8cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733325747546 2024-12-04T15:22:31,329 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting eda26424d85b46e4b6f6334442a47f5b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733325747680 2024-12-04T15:22:31,329 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c4d4dc4b4da4e01bf94078f334e0318, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733325748355 2024-12-04T15:22:31,330 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c119466f513469f98c7235117b8f873, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733325749504 2024-12-04T15:22:31,330 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting add500ba3243417d9cef7fb62d4d18fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733325750134 2024-12-04T15:22:31,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742128_1304 (size=9757) 2024-12-04T15:22:31,350 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/baef8709ce0148908f186e734a4584bb 2024-12-04T15:22:31,368 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#256 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:31,369 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/12181b27d1a2475e811570bd9780d781 is 50, key is test_row_0/C:col10/1733325750753/Put/seqid=0 2024-12-04T15:22:31,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:31,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:31,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/bb0d666ba5534cad987b22d25809bed4 is 50, key is test_row_1/B:col10/1733325750762/Put/seqid=0 2024-12-04T15:22:31,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325811399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325811400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325811402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325811403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325811404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742130_1306 (size=9757) 2024-12-04T15:22:31,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742129_1305 (size=12561) 2024-12-04T15:22:31,453 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/12181b27d1a2475e811570bd9780d781 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/12181b27d1a2475e811570bd9780d781 2024-12-04T15:22:31,473 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 12181b27d1a2475e811570bd9780d781(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:31,473 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:31,474 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=11, startTime=1733325751245; duration=0sec 2024-12-04T15:22:31,475 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:31,475 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:31,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325811505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325811505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325811508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325811510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325811510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-04T15:22:31,696 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/0270ea179f07423883bfd5cd0c672618 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/0270ea179f07423883bfd5cd0c672618 2024-12-04T15:22:31,701 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 0270ea179f07423883bfd5cd0c672618(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:31,701 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:31,701 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=11, startTime=1733325751244; duration=0sec 2024-12-04T15:22:31,702 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:31,702 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:31,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325811708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325811708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325811711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325811711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:31,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325811712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:31,831 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/bb0d666ba5534cad987b22d25809bed4 2024-12-04T15:22:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/32d0e08da1794cc4b5d4a3d6f18dc7b5 is 50, key is test_row_1/C:col10/1733325750762/Put/seqid=0 2024-12-04T15:22:31,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742131_1307 (size=9757) 2024-12-04T15:22:31,860 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/32d0e08da1794cc4b5d4a3d6f18dc7b5 2024-12-04T15:22:31,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/baef8709ce0148908f186e734a4584bb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/baef8709ce0148908f186e734a4584bb 2024-12-04T15:22:31,874 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/baef8709ce0148908f186e734a4584bb, entries=100, sequenceid=207, filesize=9.5 K 2024-12-04T15:22:31,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/bb0d666ba5534cad987b22d25809bed4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bb0d666ba5534cad987b22d25809bed4 2024-12-04T15:22:31,886 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bb0d666ba5534cad987b22d25809bed4, entries=100, sequenceid=207, filesize=9.5 K 2024-12-04T15:22:31,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/32d0e08da1794cc4b5d4a3d6f18dc7b5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/32d0e08da1794cc4b5d4a3d6f18dc7b5 2024-12-04T15:22:31,898 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/32d0e08da1794cc4b5d4a3d6f18dc7b5, entries=100, sequenceid=207, filesize=9.5 K 2024-12-04T15:22:31,900 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for e068873424c7ff89600b835c0496bec4 in 613ms, sequenceid=207, compaction requested=false 2024-12-04T15:22:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-04T15:22:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-04T15:22:31,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-04T15:22:31,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 921 msec 2024-12-04T15:22:31,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 925 msec 2024-12-04T15:22:32,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:22:32,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:32,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:32,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:32,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:32,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:32,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:32,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325812031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325812032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325812033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325812034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325812034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d525fd91820c4ae3af422a77a7ab2e6c is 50, key is test_row_0/A:col10/1733325751402/Put/seqid=0 2024-12-04T15:22:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-04T15:22:32,083 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-04T15:22:32,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:32,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-04T15:22:32,087 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:32,088 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:32,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:32,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742132_1308 (size=12151) 2024-12-04T15:22:32,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325812136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325812137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325812138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325812139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325812139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:32,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:32,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:32,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325812339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325812340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325812344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325812345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325812345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:32,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:32,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:32,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d525fd91820c4ae3af422a77a7ab2e6c 2024-12-04T15:22:32,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c2e73d84b73b417ba79c51ebcb1447bc is 50, key is test_row_0/B:col10/1733325751402/Put/seqid=0 2024-12-04T15:22:32,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742133_1309 (size=12151) 2024-12-04T15:22:32,551 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:32,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:32,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c2e73d84b73b417ba79c51ebcb1447bc 2024-12-04T15:22:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/6438228a917b4e0f9af30b7695dbe292 is 50, key is test_row_0/C:col10/1733325751402/Put/seqid=0 2024-12-04T15:22:32,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742134_1310 (size=12151) 2024-12-04T15:22:32,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/6438228a917b4e0f9af30b7695dbe292 2024-12-04T15:22:32,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d525fd91820c4ae3af422a77a7ab2e6c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d525fd91820c4ae3af422a77a7ab2e6c 2024-12-04T15:22:32,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d525fd91820c4ae3af422a77a7ab2e6c, entries=150, sequenceid=236, filesize=11.9 K 2024-12-04T15:22:32,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c2e73d84b73b417ba79c51ebcb1447bc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c2e73d84b73b417ba79c51ebcb1447bc 2024-12-04T15:22:32,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c2e73d84b73b417ba79c51ebcb1447bc, entries=150, sequenceid=236, filesize=11.9 K 2024-12-04T15:22:32,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/6438228a917b4e0f9af30b7695dbe292 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6438228a917b4e0f9af30b7695dbe292 2024-12-04T15:22:32,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6438228a917b4e0f9af30b7695dbe292, entries=150, sequenceid=236, filesize=11.9 K 2024-12-04T15:22:32,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for e068873424c7ff89600b835c0496bec4 in 621ms, sequenceid=236, compaction requested=true 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:32,643 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:32,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:32,643 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:32,644 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:32,644 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:32,645 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,645 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d0df8282bf7541f8a70d574e6077dfc9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/baef8709ce0148908f186e734a4584bb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d525fd91820c4ae3af422a77a7ab2e6c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=33.7 K 2024-12-04T15:22:32,645 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:32,645 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d0df8282bf7541f8a70d574e6077dfc9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733325750134 2024-12-04T15:22:32,645 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:32,645 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,645 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/0270ea179f07423883bfd5cd0c672618, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bb0d666ba5534cad987b22d25809bed4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c2e73d84b73b417ba79c51ebcb1447bc] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=33.7 K 2024-12-04T15:22:32,645 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting baef8709ce0148908f186e734a4584bb, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733325750762 2024-12-04T15:22:32,646 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0270ea179f07423883bfd5cd0c672618, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733325750134 2024-12-04T15:22:32,646 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d525fd91820c4ae3af422a77a7ab2e6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733325751402 2024-12-04T15:22:32,646 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb0d666ba5534cad987b22d25809bed4, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733325750762 2024-12-04T15:22:32,647 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2e73d84b73b417ba79c51ebcb1447bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733325751402 2024-12-04T15:22:32,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:22:32,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:32,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:32,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:32,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:32,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:32,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:32,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:32,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/4c18f0c4a6654f14a5086e59a7b5c419 is 50, key is test_row_0/A:col10/1733325752031/Put/seqid=0 2024-12-04T15:22:32,662 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#263 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:32,663 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/2fb1a01bf8184e47a02f687703adeecc is 50, key is test_row_0/B:col10/1733325751402/Put/seqid=0 2024-12-04T15:22:32,672 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#264 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:32,673 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3737203768824adda04e867470f38033 is 50, key is test_row_0/A:col10/1733325751402/Put/seqid=0 2024-12-04T15:22:32,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325812690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325812694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325812696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325812698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325812698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742135_1311 (size=14541) 2024-12-04T15:22:32,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/4c18f0c4a6654f14a5086e59a7b5c419 2024-12-04T15:22:32,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:32,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:32,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742136_1312 (size=12663) 2024-12-04T15:22:32,740 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/2fb1a01bf8184e47a02f687703adeecc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2fb1a01bf8184e47a02f687703adeecc 2024-12-04T15:22:32,745 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 2fb1a01bf8184e47a02f687703adeecc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:32,745 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:32,745 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325752643; duration=0sec 2024-12-04T15:22:32,745 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:32,745 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:32,745 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:32,747 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:32,748 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:32,748 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,748 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/12181b27d1a2475e811570bd9780d781, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/32d0e08da1794cc4b5d4a3d6f18dc7b5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6438228a917b4e0f9af30b7695dbe292] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=33.7 K 2024-12-04T15:22:32,749 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12181b27d1a2475e811570bd9780d781, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733325750134 2024-12-04T15:22:32,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7c45b3a23c854735939973d8a1922d95 is 50, key is test_row_0/B:col10/1733325752031/Put/seqid=0 2024-12-04T15:22:32,749 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32d0e08da1794cc4b5d4a3d6f18dc7b5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733325750762 2024-12-04T15:22:32,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6438228a917b4e0f9af30b7695dbe292, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733325751402 2024-12-04T15:22:32,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742137_1313 (size=12663) 2024-12-04T15:22:32,791 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3737203768824adda04e867470f38033 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3737203768824adda04e867470f38033 2024-12-04T15:22:32,796 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:32,797 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/6567d044e0e14f89b80fe4df0fd71aa3 is 50, key is test_row_0/C:col10/1733325751402/Put/seqid=0 2024-12-04T15:22:32,799 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 3737203768824adda04e867470f38033(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:32,799 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:32,799 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325752643; duration=0sec 2024-12-04T15:22:32,800 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:32,800 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742139_1315 (size=12663) 2024-12-04T15:22:32,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325812801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325812803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325812804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325812805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:32,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325812806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,811 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/6567d044e0e14f89b80fe4df0fd71aa3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6567d044e0e14f89b80fe4df0fd71aa3 2024-12-04T15:22:32,817 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 6567d044e0e14f89b80fe4df0fd71aa3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:32,817 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:32,817 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325752643; duration=0sec 2024-12-04T15:22:32,818 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:32,818 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:32,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742138_1314 (size=12151) 2024-12-04T15:22:32,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7c45b3a23c854735939973d8a1922d95 2024-12-04T15:22:32,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/809f138c9112457ebc7c797598f8ed28 is 50, key is test_row_0/C:col10/1733325752031/Put/seqid=0 2024-12-04T15:22:32,866 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:32,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:32,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:32,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:32,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:32,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742140_1316 (size=12151) 2024-12-04T15:22:33,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325813007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325813010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325813010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325813011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325813011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,177 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:33,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/809f138c9112457ebc7c797598f8ed28 2024-12-04T15:22:33,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/4c18f0c4a6654f14a5086e59a7b5c419 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/4c18f0c4a6654f14a5086e59a7b5c419 2024-12-04T15:22:33,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/4c18f0c4a6654f14a5086e59a7b5c419, entries=200, sequenceid=248, filesize=14.2 K 2024-12-04T15:22:33,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7c45b3a23c854735939973d8a1922d95 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7c45b3a23c854735939973d8a1922d95 2024-12-04T15:22:33,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7c45b3a23c854735939973d8a1922d95, entries=150, sequenceid=248, filesize=11.9 K 2024-12-04T15:22:33,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/809f138c9112457ebc7c797598f8ed28 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/809f138c9112457ebc7c797598f8ed28 2024-12-04T15:22:33,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/809f138c9112457ebc7c797598f8ed28, entries=150, sequenceid=248, filesize=11.9 K 2024-12-04T15:22:33,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for e068873424c7ff89600b835c0496bec4 in 659ms, sequenceid=248, compaction requested=false 2024-12-04T15:22:33,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:33,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:33,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:22:33,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:33,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:33,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:33,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:33,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:33,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:33,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325813318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e447320dedd74704b3f06e4dc6561d5a is 50, key is test_row_0/A:col10/1733325752686/Put/seqid=0 2024-12-04T15:22:33,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325813319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325813320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325813321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325813321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,333 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742141_1317 (size=17181) 2024-12-04T15:22:33,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e447320dedd74704b3f06e4dc6561d5a 2024-12-04T15:22:33,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/115ee226df3841f8ac1e37bd1cacefd8 is 50, key is test_row_0/B:col10/1733325752686/Put/seqid=0 2024-12-04T15:22:33,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742142_1318 (size=12301) 2024-12-04T15:22:33,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/115ee226df3841f8ac1e37bd1cacefd8 2024-12-04T15:22:33,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325813424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325813425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325813425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325813426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325813427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b4e3bcffb6394dc288f1d47c399fdde7 is 50, key is test_row_0/C:col10/1733325752686/Put/seqid=0 2024-12-04T15:22:33,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742143_1319 (size=12301) 2024-12-04T15:22:33,485 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,627 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-04T15:22:33,627 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-04T15:22:33,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325813629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325813629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325813629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325813630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325813631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b4e3bcffb6394dc288f1d47c399fdde7 2024-12-04T15:22:33,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e447320dedd74704b3f06e4dc6561d5a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e447320dedd74704b3f06e4dc6561d5a 2024-12-04T15:22:33,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e447320dedd74704b3f06e4dc6561d5a, entries=250, sequenceid=278, filesize=16.8 K 2024-12-04T15:22:33,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/115ee226df3841f8ac1e37bd1cacefd8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/115ee226df3841f8ac1e37bd1cacefd8 2024-12-04T15:22:33,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/115ee226df3841f8ac1e37bd1cacefd8, entries=150, sequenceid=278, filesize=12.0 K 2024-12-04T15:22:33,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b4e3bcffb6394dc288f1d47c399fdde7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b4e3bcffb6394dc288f1d47c399fdde7 2024-12-04T15:22:33,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b4e3bcffb6394dc288f1d47c399fdde7, entries=150, sequenceid=278, filesize=12.0 K 2024-12-04T15:22:33,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for e068873424c7ff89600b835c0496bec4 in 565ms, sequenceid=278, compaction requested=true 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:33,880 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:33,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:22:33,880 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:33,882 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44385 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:33,882 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:33,882 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:33,882 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:33,882 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,882 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,882 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2fb1a01bf8184e47a02f687703adeecc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7c45b3a23c854735939973d8a1922d95, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/115ee226df3841f8ac1e37bd1cacefd8] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.2 K 2024-12-04T15:22:33,882 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3737203768824adda04e867470f38033, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/4c18f0c4a6654f14a5086e59a7b5c419, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e447320dedd74704b3f06e4dc6561d5a] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=43.3 K 2024-12-04T15:22:33,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fb1a01bf8184e47a02f687703adeecc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733325751402 2024-12-04T15:22:33,883 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3737203768824adda04e867470f38033, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733325751402 2024-12-04T15:22:33,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c45b3a23c854735939973d8a1922d95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733325752031 2024-12-04T15:22:33,883 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c18f0c4a6654f14a5086e59a7b5c419, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733325752031 2024-12-04T15:22:33,884 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 115ee226df3841f8ac1e37bd1cacefd8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325752686 2024-12-04T15:22:33,884 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e447320dedd74704b3f06e4dc6561d5a, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325752686 2024-12-04T15:22:33,910 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#271 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:33,911 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/61ed864e38354ce9a2d0dfd7b8284e5a is 50, key is test_row_0/B:col10/1733325752686/Put/seqid=0 2024-12-04T15:22:33,915 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#272 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:33,916 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3ce0b08e531a459a8c2c316c2b18c90e is 50, key is test_row_0/A:col10/1733325752686/Put/seqid=0 2024-12-04T15:22:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:33,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742145_1321 (size=12915) 2024-12-04T15:22:33,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742144_1320 (size=12915) 2024-12-04T15:22:33,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:22:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:33,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/16437fec2c104bb6b0768571fdb27c25 is 50, key is test_row_0/A:col10/1733325753938/Put/seqid=0 2024-12-04T15:22:33,945 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/61ed864e38354ce9a2d0dfd7b8284e5a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61ed864e38354ce9a2d0dfd7b8284e5a 2024-12-04T15:22:33,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:33,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:33,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742146_1322 (size=12301) 2024-12-04T15:22:33,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/16437fec2c104bb6b0768571fdb27c25 2024-12-04T15:22:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:33,953 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 61ed864e38354ce9a2d0dfd7b8284e5a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:33,953 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:33,953 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325753880; duration=0sec 2024-12-04T15:22:33,953 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:33,953 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:33,953 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:33,957 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:33,957 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:33,957 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:33,957 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6567d044e0e14f89b80fe4df0fd71aa3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/809f138c9112457ebc7c797598f8ed28, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b4e3bcffb6394dc288f1d47c399fdde7] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.2 K 2024-12-04T15:22:33,958 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6567d044e0e14f89b80fe4df0fd71aa3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733325751402 2024-12-04T15:22:33,958 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 809f138c9112457ebc7c797598f8ed28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733325752031 2024-12-04T15:22:33,959 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b4e3bcffb6394dc288f1d47c399fdde7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325752686 2024-12-04T15:22:33,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7a04b2eb60814753be278caa6c708614 is 50, key is test_row_0/B:col10/1733325753938/Put/seqid=0 2024-12-04T15:22:33,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742147_1323 (size=12301) 2024-12-04T15:22:33,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7a04b2eb60814753be278caa6c708614 2024-12-04T15:22:33,968 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#275 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:33,968 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/f4df746e73054e699bf6f3dfd121b1e5 is 50, key is test_row_0/C:col10/1733325752686/Put/seqid=0 2024-12-04T15:22:33,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742148_1324 (size=12915) 2024-12-04T15:22:33,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325813970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325813972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,979 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/f4df746e73054e699bf6f3dfd121b1e5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/f4df746e73054e699bf6f3dfd121b1e5 2024-12-04T15:22:33,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325813974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325813979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:33,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325813980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:33,985 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into f4df746e73054e699bf6f3dfd121b1e5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:33,985 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:33,985 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325753880; duration=0sec 2024-12-04T15:22:33,985 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:33,985 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:33,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d52657b9096d4ecd853c5c3076763c7b is 50, key is test_row_0/C:col10/1733325753938/Put/seqid=0 2024-12-04T15:22:34,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742149_1325 (size=12301) 2024-12-04T15:22:34,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d52657b9096d4ecd853c5c3076763c7b 2024-12-04T15:22:34,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/16437fec2c104bb6b0768571fdb27c25 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/16437fec2c104bb6b0768571fdb27c25 2024-12-04T15:22:34,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/16437fec2c104bb6b0768571fdb27c25, entries=150, sequenceid=291, filesize=12.0 K 2024-12-04T15:22:34,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7a04b2eb60814753be278caa6c708614 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a04b2eb60814753be278caa6c708614 2024-12-04T15:22:34,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a04b2eb60814753be278caa6c708614, entries=150, sequenceid=291, filesize=12.0 K 2024-12-04T15:22:34,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d52657b9096d4ecd853c5c3076763c7b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d52657b9096d4ecd853c5c3076763c7b 2024-12-04T15:22:34,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325814078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325814079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325814080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d52657b9096d4ecd853c5c3076763c7b, entries=150, sequenceid=291, filesize=12.0 K 2024-12-04T15:22:34,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e068873424c7ff89600b835c0496bec4 in 153ms, sequenceid=291, compaction requested=false 2024-12-04T15:22:34,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:34,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:22:34,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:34,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:34,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:34,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:34,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:34,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:34,103 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:34,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:34,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/8c61e042cb6d443c8a5cbc950476e3bc is 50, key is test_row_0/A:col10/1733325754094/Put/seqid=0 2024-12-04T15:22:34,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742150_1326 (size=17181) 2024-12-04T15:22:34,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325814147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325814150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:34,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325814253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325814254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:34,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:34,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325814284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325814284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325814287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,342 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3ce0b08e531a459a8c2c316c2b18c90e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3ce0b08e531a459a8c2c316c2b18c90e 2024-12-04T15:22:34,360 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 3ce0b08e531a459a8c2c316c2b18c90e(size=12.6 K), total size for store is 24.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:34,360 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:34,360 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325753880; duration=0sec 2024-12-04T15:22:34,360 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:34,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:34,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:34,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:34,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325814458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325814458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/8c61e042cb6d443c8a5cbc950476e3bc 2024-12-04T15:22:34,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1a4e5487befd4035b98886cc2a7b5bab is 50, key is test_row_0/B:col10/1733325754094/Put/seqid=0 2024-12-04T15:22:34,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:34,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:34,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325814591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325814591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325814592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742151_1327 (size=12301) 2024-12-04T15:22:34,720 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:34,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:34,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325814760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:34,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325814763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:34,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:34,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1a4e5487befd4035b98886cc2a7b5bab 2024-12-04T15:22:35,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:35,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:35,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/aa82b2456f204f2bbfbf575d25ae0d90 is 50, key is test_row_0/C:col10/1733325754094/Put/seqid=0 2024-12-04T15:22:35,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742152_1328 (size=12301) 2024-12-04T15:22:35,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325815094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:35,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325815096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325815096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,187 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:35,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:35,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325815285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325815286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,341 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:35,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:35,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:35,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/aa82b2456f204f2bbfbf575d25ae0d90 2024-12-04T15:22:35,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/8c61e042cb6d443c8a5cbc950476e3bc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8c61e042cb6d443c8a5cbc950476e3bc 2024-12-04T15:22:35,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8c61e042cb6d443c8a5cbc950476e3bc, entries=250, sequenceid=318, filesize=16.8 K 2024-12-04T15:22:35,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1a4e5487befd4035b98886cc2a7b5bab as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1a4e5487befd4035b98886cc2a7b5bab 2024-12-04T15:22:35,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1a4e5487befd4035b98886cc2a7b5bab, entries=150, sequenceid=318, filesize=12.0 K 2024-12-04T15:22:35,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/aa82b2456f204f2bbfbf575d25ae0d90 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/aa82b2456f204f2bbfbf575d25ae0d90 2024-12-04T15:22:35,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/aa82b2456f204f2bbfbf575d25ae0d90, entries=150, sequenceid=318, filesize=12.0 K 2024-12-04T15:22:35,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for e068873424c7ff89600b835c0496bec4 in 1395ms, sequenceid=318, compaction requested=true 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:35,491 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:35,491 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:35,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:35,492 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42397 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:35,492 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:35,492 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:35,492 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:35,492 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,493 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3ce0b08e531a459a8c2c316c2b18c90e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/16437fec2c104bb6b0768571fdb27c25, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8c61e042cb6d443c8a5cbc950476e3bc] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=41.4 K 2024-12-04T15:22:35,493 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,493 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61ed864e38354ce9a2d0dfd7b8284e5a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a04b2eb60814753be278caa6c708614, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1a4e5487befd4035b98886cc2a7b5bab] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.6 K 2024-12-04T15:22:35,493 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ce0b08e531a459a8c2c316c2b18c90e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325752686 2024-12-04T15:22:35,493 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 61ed864e38354ce9a2d0dfd7b8284e5a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325752686 2024-12-04T15:22:35,494 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16437fec2c104bb6b0768571fdb27c25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325753320 2024-12-04T15:22:35,494 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a04b2eb60814753be278caa6c708614, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325753320 2024-12-04T15:22:35,494 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c61e042cb6d443c8a5cbc950476e3bc, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733325753941 2024-12-04T15:22:35,495 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:35,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,496 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:35,496 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a4e5487befd4035b98886cc2a7b5bab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733325753941 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:35,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/9ed14754d956440ba110ebf54f14e8e8 is 50, key is test_row_0/A:col10/1733325754146/Put/seqid=0 2024-12-04T15:22:35,514 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#281 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:35,515 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7be6ba45d3e44c6b99bfb5418209d9a3 is 50, key is test_row_0/B:col10/1733325754094/Put/seqid=0 2024-12-04T15:22:35,526 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:35,527 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/269f46413f474f4291bc6e6fe2da5817 is 50, key is test_row_0/A:col10/1733325754094/Put/seqid=0 2024-12-04T15:22:35,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742153_1329 (size=12301) 2024-12-04T15:22:35,534 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/9ed14754d956440ba110ebf54f14e8e8 2024-12-04T15:22:35,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1dec40a95be640ac86902523585a1ef3 is 50, key is test_row_0/B:col10/1733325754146/Put/seqid=0 2024-12-04T15:22:35,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742154_1330 (size=13017) 2024-12-04T15:22:35,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742155_1331 (size=13017) 2024-12-04T15:22:35,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742156_1332 (size=12301) 2024-12-04T15:22:35,596 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1dec40a95be640ac86902523585a1ef3 2024-12-04T15:22:35,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/c783de7dd83d4f958317dde70ca6ee16 is 50, key is test_row_0/C:col10/1733325754146/Put/seqid=0 2024-12-04T15:22:35,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742157_1333 (size=12301) 2024-12-04T15:22:35,621 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/c783de7dd83d4f958317dde70ca6ee16 2024-12-04T15:22:35,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/9ed14754d956440ba110ebf54f14e8e8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/9ed14754d956440ba110ebf54f14e8e8 2024-12-04T15:22:35,631 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/9ed14754d956440ba110ebf54f14e8e8, entries=150, sequenceid=330, filesize=12.0 K 2024-12-04T15:22:35,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1dec40a95be640ac86902523585a1ef3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1dec40a95be640ac86902523585a1ef3 2024-12-04T15:22:35,639 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1dec40a95be640ac86902523585a1ef3, entries=150, sequenceid=330, filesize=12.0 K 2024-12-04T15:22:35,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/c783de7dd83d4f958317dde70ca6ee16 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c783de7dd83d4f958317dde70ca6ee16 2024-12-04T15:22:35,648 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c783de7dd83d4f958317dde70ca6ee16, entries=150, sequenceid=330, filesize=12.0 K 2024-12-04T15:22:35,649 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for e068873424c7ff89600b835c0496bec4 in 153ms, sequenceid=330, compaction requested=true 2024-12-04T15:22:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-04T15:22:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-04T15:22:35,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-04T15:22:35,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5620 sec 2024-12-04T15:22:35,655 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 3.5700 sec 2024-12-04T15:22:35,967 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7be6ba45d3e44c6b99bfb5418209d9a3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7be6ba45d3e44c6b99bfb5418209d9a3 2024-12-04T15:22:35,973 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 7be6ba45d3e44c6b99bfb5418209d9a3(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:35,973 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:35,973 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325755491; duration=0sec 2024-12-04T15:22:35,974 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:35,974 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:35,974 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:35,976 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:35,976 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:35,976 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:35,976 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/f4df746e73054e699bf6f3dfd121b1e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d52657b9096d4ecd853c5c3076763c7b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/aa82b2456f204f2bbfbf575d25ae0d90, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c783de7dd83d4f958317dde70ca6ee16] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=48.7 K 2024-12-04T15:22:35,977 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f4df746e73054e699bf6f3dfd121b1e5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325752686 2024-12-04T15:22:35,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d52657b9096d4ecd853c5c3076763c7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325753320 2024-12-04T15:22:35,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting aa82b2456f204f2bbfbf575d25ae0d90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733325753941 2024-12-04T15:22:35,979 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c783de7dd83d4f958317dde70ca6ee16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325754118 2024-12-04T15:22:35,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/269f46413f474f4291bc6e6fe2da5817 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/269f46413f474f4291bc6e6fe2da5817 2024-12-04T15:22:35,985 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 269f46413f474f4291bc6e6fe2da5817(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:35,985 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:35,985 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325755491; duration=0sec 2024-12-04T15:22:35,985 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:35,985 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:35,996 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#285 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:35,997 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2b1a51a564f84286ac593507551da19e is 50, key is test_row_0/C:col10/1733325754146/Put/seqid=0 2024-12-04T15:22:36,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742158_1334 (size=13051) 2024-12-04T15:22:36,048 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2b1a51a564f84286ac593507551da19e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2b1a51a564f84286ac593507551da19e 2024-12-04T15:22:36,054 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 2b1a51a564f84286ac593507551da19e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:36,054 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:36,054 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=12, startTime=1733325755491; duration=0sec 2024-12-04T15:22:36,054 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:36,054 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:36,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:36,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3fcbb056bf284631a21bfe7eef4dac6b is 50, key is test_row_0/A:col10/1733325756107/Put/seqid=0 2024-12-04T15:22:36,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742159_1335 (size=12301) 2024-12-04T15:22:36,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3fcbb056bf284631a21bfe7eef4dac6b 2024-12-04T15:22:36,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1c0c0ad797214b72adecfa75b2cb6f3b is 50, key is test_row_0/B:col10/1733325756107/Put/seqid=0 2024-12-04T15:22:36,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325816144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325816146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325816148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742160_1336 (size=12301) 2024-12-04T15:22:36,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1c0c0ad797214b72adecfa75b2cb6f3b 2024-12-04T15:22:36,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/25f62f9e96144cd8968d2317d736def7 is 50, key is test_row_0/C:col10/1733325756107/Put/seqid=0 2024-12-04T15:22:36,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-04T15:22:36,193 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-04T15:22:36,195 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-04T15:22:36,197 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:36,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-04T15:22:36,198 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:36,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:36,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742161_1337 (size=12301) 2024-12-04T15:22:36,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325816250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325816250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325816260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325816296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325816297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-04T15:22:36,351 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-04T15:22:36,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:36,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325816453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325816453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325816463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-04T15:22:36,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-04T15:22:36,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:36,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:36,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:36,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/25f62f9e96144cd8968d2317d736def7 2024-12-04T15:22:36,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3fcbb056bf284631a21bfe7eef4dac6b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3fcbb056bf284631a21bfe7eef4dac6b 2024-12-04T15:22:36,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3fcbb056bf284631a21bfe7eef4dac6b, entries=150, sequenceid=345, filesize=12.0 K 2024-12-04T15:22:36,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1c0c0ad797214b72adecfa75b2cb6f3b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1c0c0ad797214b72adecfa75b2cb6f3b 2024-12-04T15:22:36,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1c0c0ad797214b72adecfa75b2cb6f3b, entries=150, sequenceid=345, filesize=12.0 K 2024-12-04T15:22:36,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/25f62f9e96144cd8968d2317d736def7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/25f62f9e96144cd8968d2317d736def7 2024-12-04T15:22:36,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/25f62f9e96144cd8968d2317d736def7, entries=150, sequenceid=345, filesize=12.0 K 2024-12-04T15:22:36,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e068873424c7ff89600b835c0496bec4 in 534ms, sequenceid=345, compaction requested=true 2024-12-04T15:22:36,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:36,644 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:36,645 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:36,645 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:36,645 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,646 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/269f46413f474f4291bc6e6fe2da5817, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/9ed14754d956440ba110ebf54f14e8e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3fcbb056bf284631a21bfe7eef4dac6b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.7 K 2024-12-04T15:22:36,646 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 269f46413f474f4291bc6e6fe2da5817, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733325753941 2024-12-04T15:22:36,646 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ed14754d956440ba110ebf54f14e8e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325754118 2024-12-04T15:22:36,647 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fcbb056bf284631a21bfe7eef4dac6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733325756106 2024-12-04T15:22:36,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:36,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:36,648 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:36,650 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:36,650 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:36,650 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,650 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7be6ba45d3e44c6b99bfb5418209d9a3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1dec40a95be640ac86902523585a1ef3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1c0c0ad797214b72adecfa75b2cb6f3b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.7 K 2024-12-04T15:22:36,652 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7be6ba45d3e44c6b99bfb5418209d9a3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733325753941 2024-12-04T15:22:36,653 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dec40a95be640ac86902523585a1ef3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325754118 2024-12-04T15:22:36,653 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c0c0ad797214b72adecfa75b2cb6f3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733325756106 2024-12-04T15:22:36,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:36,661 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,662 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:36,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:36,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:36,678 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#289 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:36,679 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/8139bdd96761415ab15f72beceda9523 is 50, key is test_row_0/A:col10/1733325756107/Put/seqid=0 2024-12-04T15:22:36,682 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#290 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:36,682 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/2d6bc1e48bfb48c19349d050a70a108e is 50, key is test_row_0/B:col10/1733325756107/Put/seqid=0 2024-12-04T15:22:36,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c0a55abd0dde4bf9a42c610468bc5366 is 50, key is test_row_0/A:col10/1733325756143/Put/seqid=0 2024-12-04T15:22:36,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742162_1338 (size=13119) 2024-12-04T15:22:36,742 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/8139bdd96761415ab15f72beceda9523 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8139bdd96761415ab15f72beceda9523 2024-12-04T15:22:36,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742163_1339 (size=13119) 2024-12-04T15:22:36,749 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/2d6bc1e48bfb48c19349d050a70a108e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2d6bc1e48bfb48c19349d050a70a108e 2024-12-04T15:22:36,750 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 8139bdd96761415ab15f72beceda9523(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:36,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:36,750 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325756644; duration=0sec 2024-12-04T15:22:36,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:36,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:36,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-04T15:22:36,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-04T15:22:36,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-04T15:22:36,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. because compaction request was cancelled 2024-12-04T15:22:36,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:36,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742164_1340 (size=12301) 2024-12-04T15:22:36,757 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c0a55abd0dde4bf9a42c610468bc5366 2024-12-04T15:22:36,757 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 2d6bc1e48bfb48c19349d050a70a108e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:36,757 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:36,757 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325756648; duration=0sec 2024-12-04T15:22:36,758 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:36,758 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:36,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:36,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/947ee09864694768897adffd5f811fdd is 50, key is test_row_0/B:col10/1733325756143/Put/seqid=0 2024-12-04T15:22:36,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742165_1341 (size=12301) 2024-12-04T15:22:36,777 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/947ee09864694768897adffd5f811fdd 2024-12-04T15:22:36,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325816777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325816778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325816778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b28228fed2274cc79237368d0e6b2142 is 50, key is test_row_0/C:col10/1733325756143/Put/seqid=0 2024-12-04T15:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-04T15:22:36,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742166_1342 (size=12301) 2024-12-04T15:22:36,828 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b28228fed2274cc79237368d0e6b2142 2024-12-04T15:22:36,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/c0a55abd0dde4bf9a42c610468bc5366 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0a55abd0dde4bf9a42c610468bc5366 2024-12-04T15:22:36,847 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0a55abd0dde4bf9a42c610468bc5366, entries=150, sequenceid=369, filesize=12.0 K 2024-12-04T15:22:36,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/947ee09864694768897adffd5f811fdd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/947ee09864694768897adffd5f811fdd 2024-12-04T15:22:36,855 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/947ee09864694768897adffd5f811fdd, entries=150, sequenceid=369, filesize=12.0 K 2024-12-04T15:22:36,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b28228fed2274cc79237368d0e6b2142 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b28228fed2274cc79237368d0e6b2142 2024-12-04T15:22:36,864 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b28228fed2274cc79237368d0e6b2142, entries=150, sequenceid=369, filesize=12.0 K 2024-12-04T15:22:36,865 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for e068873424c7ff89600b835c0496bec4 in 203ms, sequenceid=369, compaction requested=true 2024-12-04T15:22:36,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:36,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:36,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-04T15:22:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-04T15:22:36,871 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-04T15:22:36,871 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 669 msec 2024-12-04T15:22:36,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 685 msec 2024-12-04T15:22:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:36,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:22:36,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:36,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:36,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:36,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:36,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/96dec61512e743c9a808640efa8a7723 is 50, key is test_row_0/A:col10/1733325756775/Put/seqid=0 2024-12-04T15:22:36,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742167_1343 (size=14741) 2024-12-04T15:22:36,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/96dec61512e743c9a808640efa8a7723 2024-12-04T15:22:36,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/4f78b9fad6bb4e4ea3ee1a999b01c039 is 50, key is test_row_0/B:col10/1733325756775/Put/seqid=0 2024-12-04T15:22:36,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742168_1344 (size=12301) 2024-12-04T15:22:36,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325816969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325816974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:36,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325816974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325817075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325817079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325817080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325817285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325817288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325817288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-04T15:22:37,304 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-04T15:22:37,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:37,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-04T15:22:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-04T15:22:37,314 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:37,314 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:37,314 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:37,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/4f78b9fad6bb4e4ea3ee1a999b01c039 2024-12-04T15:22:37,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/c78dcd76dbf5464eafd6b6225b5cb891 is 50, key is test_row_0/C:col10/1733325756775/Put/seqid=0 2024-12-04T15:22:37,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742169_1345 (size=12301) 2024-12-04T15:22:37,400 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/c78dcd76dbf5464eafd6b6225b5cb891 2024-12-04T15:22:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-04T15:22:37,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/96dec61512e743c9a808640efa8a7723 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/96dec61512e743c9a808640efa8a7723 2024-12-04T15:22:37,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/96dec61512e743c9a808640efa8a7723, entries=200, sequenceid=387, filesize=14.4 K 2024-12-04T15:22:37,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/4f78b9fad6bb4e4ea3ee1a999b01c039 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/4f78b9fad6bb4e4ea3ee1a999b01c039 2024-12-04T15:22:37,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/4f78b9fad6bb4e4ea3ee1a999b01c039, entries=150, sequenceid=387, filesize=12.0 K 2024-12-04T15:22:37,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/c78dcd76dbf5464eafd6b6225b5cb891 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c78dcd76dbf5464eafd6b6225b5cb891 2024-12-04T15:22:37,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c78dcd76dbf5464eafd6b6225b5cb891, entries=150, sequenceid=387, filesize=12.0 K 2024-12-04T15:22:37,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for e068873424c7ff89600b835c0496bec4 in 549ms, sequenceid=387, compaction requested=true 2024-12-04T15:22:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:37,438 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:37,439 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:37,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:37,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:37,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:37,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:37,440 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:37,440 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:37,440 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:37,440 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8139bdd96761415ab15f72beceda9523, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0a55abd0dde4bf9a42c610468bc5366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/96dec61512e743c9a808640efa8a7723] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=39.2 K 2024-12-04T15:22:37,440 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:37,440 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:37,440 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:37,440 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2d6bc1e48bfb48c19349d050a70a108e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/947ee09864694768897adffd5f811fdd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/4f78b9fad6bb4e4ea3ee1a999b01c039] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.8 K 2024-12-04T15:22:37,441 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8139bdd96761415ab15f72beceda9523, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733325756106 2024-12-04T15:22:37,441 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d6bc1e48bfb48c19349d050a70a108e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733325756106 2024-12-04T15:22:37,441 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0a55abd0dde4bf9a42c610468bc5366, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733325756143 2024-12-04T15:22:37,442 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 947ee09864694768897adffd5f811fdd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733325756143 2024-12-04T15:22:37,442 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96dec61512e743c9a808640efa8a7723, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325756775 2024-12-04T15:22:37,442 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f78b9fad6bb4e4ea3ee1a999b01c039, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325756775 2024-12-04T15:22:37,462 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#297 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:37,462 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/002b8f431e7240e4a77a556a124ea41c is 50, key is test_row_0/A:col10/1733325756775/Put/seqid=0 2024-12-04T15:22:37,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-04T15:22:37,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:37,468 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:22:37,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:37,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:37,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:37,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:37,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:37,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:37,477 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#298 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:37,477 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c06f34291a054fa5a40b89f089b85f5e is 50, key is test_row_0/B:col10/1733325756775/Put/seqid=0 2024-12-04T15:22:37,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3a51b5dc9b8444b2b0b8febbe96a7d96 is 50, key is test_row_0/A:col10/1733325756966/Put/seqid=0 2024-12-04T15:22:37,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742170_1346 (size=13221) 2024-12-04T15:22:37,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742171_1347 (size=13221) 2024-12-04T15:22:37,527 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/002b8f431e7240e4a77a556a124ea41c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/002b8f431e7240e4a77a556a124ea41c 2024-12-04T15:22:37,529 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c06f34291a054fa5a40b89f089b85f5e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c06f34291a054fa5a40b89f089b85f5e 2024-12-04T15:22:37,534 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 002b8f431e7240e4a77a556a124ea41c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:37,534 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:37,534 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325757438; duration=0sec 2024-12-04T15:22:37,534 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:37,534 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:37,534 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:37,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742172_1348 (size=12301) 2024-12-04T15:22:37,538 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3a51b5dc9b8444b2b0b8febbe96a7d96 2024-12-04T15:22:37,540 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:37,540 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:37,540 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:37,540 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2b1a51a564f84286ac593507551da19e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/25f62f9e96144cd8968d2317d736def7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b28228fed2274cc79237368d0e6b2142, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c78dcd76dbf5464eafd6b6225b5cb891] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=48.8 K 2024-12-04T15:22:37,541 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into c06f34291a054fa5a40b89f089b85f5e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:37,541 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:37,541 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325757438; duration=0sec 2024-12-04T15:22:37,541 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:37,541 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:37,542 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b1a51a564f84286ac593507551da19e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733325754118 2024-12-04T15:22:37,542 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25f62f9e96144cd8968d2317d736def7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733325756106 2024-12-04T15:22:37,542 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b28228fed2274cc79237368d0e6b2142, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733325756143 2024-12-04T15:22:37,543 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c78dcd76dbf5464eafd6b6225b5cb891, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325756775 2024-12-04T15:22:37,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c9c20f1c4896498fa2a98e79bd7739db is 50, key is test_row_0/B:col10/1733325756966/Put/seqid=0 2024-12-04T15:22:37,567 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#301 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:37,567 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/9130181dcc0240ecbe816dc7be45f96f is 50, key is test_row_0/C:col10/1733325756775/Put/seqid=0 2024-12-04T15:22:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:37,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:37,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742173_1349 (size=12301) 2024-12-04T15:22:37,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742174_1350 (size=13187) 2024-12-04T15:22:37,605 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c9c20f1c4896498fa2a98e79bd7739db 2024-12-04T15:22:37,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325817606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325817607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325817609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/37a4c9109d3947c8bb1a74030bc01332 is 50, key is test_row_0/C:col10/1733325756966/Put/seqid=0 2024-12-04T15:22:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-04T15:22:37,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742175_1351 (size=12301) 2024-12-04T15:22:37,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325817711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325817712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325817712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325817916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325817916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325817917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-04T15:22:38,006 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/9130181dcc0240ecbe816dc7be45f96f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/9130181dcc0240ecbe816dc7be45f96f 2024-12-04T15:22:38,012 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 9130181dcc0240ecbe816dc7be45f96f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:38,012 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:38,012 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=12, startTime=1733325757439; duration=0sec 2024-12-04T15:22:38,012 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:38,013 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:38,076 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/37a4c9109d3947c8bb1a74030bc01332 2024-12-04T15:22:38,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/3a51b5dc9b8444b2b0b8febbe96a7d96 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3a51b5dc9b8444b2b0b8febbe96a7d96 2024-12-04T15:22:38,098 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3a51b5dc9b8444b2b0b8febbe96a7d96, entries=150, sequenceid=408, filesize=12.0 K 2024-12-04T15:22:38,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/c9c20f1c4896498fa2a98e79bd7739db as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c9c20f1c4896498fa2a98e79bd7739db 2024-12-04T15:22:38,105 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c9c20f1c4896498fa2a98e79bd7739db, entries=150, sequenceid=408, filesize=12.0 K 2024-12-04T15:22:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/37a4c9109d3947c8bb1a74030bc01332 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/37a4c9109d3947c8bb1a74030bc01332 2024-12-04T15:22:38,109 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/37a4c9109d3947c8bb1a74030bc01332, entries=150, sequenceid=408, filesize=12.0 K 2024-12-04T15:22:38,110 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e068873424c7ff89600b835c0496bec4 in 642ms, sequenceid=408, compaction requested=false 2024-12-04T15:22:38,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:38,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:38,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-04T15:22:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-04T15:22:38,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-04T15:22:38,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 797 msec 2024-12-04T15:22:38,114 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 807 msec 2024-12-04T15:22:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:38,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-04T15:22:38,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:38,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:38,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:38,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:38,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:38,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:38,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/98d493f91d0d451eab79d21d59ba4466 is 50, key is test_row_0/A:col10/1733325758231/Put/seqid=0 2024-12-04T15:22:38,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742176_1352 (size=12301) 2024-12-04T15:22:38,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/98d493f91d0d451eab79d21d59ba4466 2024-12-04T15:22:38,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325818309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325818310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325818310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325818316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325818320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,323 DEBUG [Thread-1218 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4173 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:22:38,324 DEBUG [Thread-1216 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:22:38,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/184227a6843d441e9d00ed01148180fb is 50, key is test_row_0/B:col10/1733325758231/Put/seqid=0 2024-12-04T15:22:38,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742177_1353 (size=12301) 2024-12-04T15:22:38,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/184227a6843d441e9d00ed01148180fb 2024-12-04T15:22:38,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/7e093f25fc1749a884a00df98323fb4c is 50, key is test_row_0/C:col10/1733325758231/Put/seqid=0 2024-12-04T15:22:38,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742178_1354 (size=12301) 2024-12-04T15:22:38,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/7e093f25fc1749a884a00df98323fb4c 2024-12-04T15:22:38,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/98d493f91d0d451eab79d21d59ba4466 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/98d493f91d0d451eab79d21d59ba4466 2024-12-04T15:22:38,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/98d493f91d0d451eab79d21d59ba4466, entries=150, sequenceid=429, filesize=12.0 K 2024-12-04T15:22:38,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/184227a6843d441e9d00ed01148180fb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/184227a6843d441e9d00ed01148180fb 2024-12-04T15:22:38,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/184227a6843d441e9d00ed01148180fb, entries=150, sequenceid=429, filesize=12.0 K 2024-12-04T15:22:38,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/7e093f25fc1749a884a00df98323fb4c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7e093f25fc1749a884a00df98323fb4c 2024-12-04T15:22:38,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325818413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325818413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325818414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7e093f25fc1749a884a00df98323fb4c, entries=150, sequenceid=429, filesize=12.0 K 2024-12-04T15:22:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-04T15:22:38,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for e068873424c7ff89600b835c0496bec4 in 142ms, sequenceid=429, compaction requested=true 2024-12-04T15:22:38,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:38,422 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-04T15:22:38,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:38,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:38,422 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:38,422 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:38,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:38,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:38,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:38,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:38,423 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:38,423 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:38,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:38,424 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:38,424 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/002b8f431e7240e4a77a556a124ea41c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3a51b5dc9b8444b2b0b8febbe96a7d96, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/98d493f91d0d451eab79d21d59ba4466] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.9 K 2024-12-04T15:22:38,424 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:38,424 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:38,424 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:38,424 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c06f34291a054fa5a40b89f089b85f5e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c9c20f1c4896498fa2a98e79bd7739db, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/184227a6843d441e9d00ed01148180fb] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.9 K 2024-12-04T15:22:38,424 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 002b8f431e7240e4a77a556a124ea41c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325756775 2024-12-04T15:22:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-04T15:22:38,425 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c06f34291a054fa5a40b89f089b85f5e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325756775 2024-12-04T15:22:38,425 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a51b5dc9b8444b2b0b8febbe96a7d96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733325756960 2024-12-04T15:22:38,426 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:38,427 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:38,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:38,427 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c9c20f1c4896498fa2a98e79bd7739db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733325756960 2024-12-04T15:22:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-04T15:22:38,428 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98d493f91d0d451eab79d21d59ba4466, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1733325757601 2024-12-04T15:22:38,428 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 184227a6843d441e9d00ed01148180fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1733325757601 2024-12-04T15:22:38,445 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#306 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:38,446 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f84b0e6a6e5c4a97b1248fc62cfd8c61 is 50, key is test_row_0/B:col10/1733325758231/Put/seqid=0 2024-12-04T15:22:38,458 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#307 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:38,458 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/1262cdbe2c9443bbb2006609ec8b0212 is 50, key is test_row_0/A:col10/1733325758231/Put/seqid=0 2024-12-04T15:22:38,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742179_1355 (size=13323) 2024-12-04T15:22:38,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742180_1356 (size=13323) 2024-12-04T15:22:38,486 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/1262cdbe2c9443bbb2006609ec8b0212 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/1262cdbe2c9443bbb2006609ec8b0212 2024-12-04T15:22:38,490 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 1262cdbe2c9443bbb2006609ec8b0212(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:38,490 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:38,491 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325758422; duration=0sec 2024-12-04T15:22:38,491 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:38,491 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:38,491 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:38,492 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:38,492 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:38,492 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:38,492 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/9130181dcc0240ecbe816dc7be45f96f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/37a4c9109d3947c8bb1a74030bc01332, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7e093f25fc1749a884a00df98323fb4c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=36.9 K 2024-12-04T15:22:38,492 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9130181dcc0240ecbe816dc7be45f96f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733325756775 2024-12-04T15:22:38,493 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37a4c9109d3947c8bb1a74030bc01332, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733325756960 2024-12-04T15:22:38,493 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e093f25fc1749a884a00df98323fb4c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1733325757601 2024-12-04T15:22:38,502 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#308 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:38,503 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b5fb6e9d7ee747008b28b9105f5a7c2a is 50, key is test_row_0/C:col10/1733325758231/Put/seqid=0 2024-12-04T15:22:38,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742181_1357 (size=13289) 2024-12-04T15:22:38,524 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/b5fb6e9d7ee747008b28b9105f5a7c2a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b5fb6e9d7ee747008b28b9105f5a7c2a 2024-12-04T15:22:38,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-04T15:22:38,531 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into b5fb6e9d7ee747008b28b9105f5a7c2a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:38,531 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:38,531 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325758423; duration=0sec 2024-12-04T15:22:38,531 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:38,531 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:38,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-04T15:22:38,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:38,584 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-04T15:22:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:38,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/37d3f3f126c3476991773e13fe43e6c8 is 50, key is test_row_0/A:col10/1733325758304/Put/seqid=0 2024-12-04T15:22:38,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742182_1358 (size=12301) 2024-12-04T15:22:38,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:38,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325818642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325818643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325818645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-04T15:22:38,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325818748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325818748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325818748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,869 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/f84b0e6a6e5c4a97b1248fc62cfd8c61 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f84b0e6a6e5c4a97b1248fc62cfd8c61 2024-12-04T15:22:38,877 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into f84b0e6a6e5c4a97b1248fc62cfd8c61(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:38,877 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:38,877 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325758422; duration=0sec 2024-12-04T15:22:38,877 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:38,878 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:38,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325818952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325818952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325818952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,004 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/37d3f3f126c3476991773e13fe43e6c8 2024-12-04T15:22:39,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/edb3a610c0ce4126be998667e3dc6e54 is 50, key is test_row_0/B:col10/1733325758304/Put/seqid=0 2024-12-04T15:22:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-04T15:22:39,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742183_1359 (size=12301) 2024-12-04T15:22:39,076 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/edb3a610c0ce4126be998667e3dc6e54 2024-12-04T15:22:39,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/cdb4c0732aaa41baa1e781f3ae47e326 is 50, key is test_row_0/C:col10/1733325758304/Put/seqid=0 2024-12-04T15:22:39,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742184_1360 (size=12301) 2024-12-04T15:22:39,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325819257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325819257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325819258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,513 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/cdb4c0732aaa41baa1e781f3ae47e326 2024-12-04T15:22:39,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-04T15:22:39,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/37d3f3f126c3476991773e13fe43e6c8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/37d3f3f126c3476991773e13fe43e6c8 2024-12-04T15:22:39,593 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/37d3f3f126c3476991773e13fe43e6c8, entries=150, sequenceid=449, filesize=12.0 K 2024-12-04T15:22:39,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/edb3a610c0ce4126be998667e3dc6e54 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/edb3a610c0ce4126be998667e3dc6e54 2024-12-04T15:22:39,615 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/edb3a610c0ce4126be998667e3dc6e54, entries=150, sequenceid=449, filesize=12.0 K 2024-12-04T15:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/cdb4c0732aaa41baa1e781f3ae47e326 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/cdb4c0732aaa41baa1e781f3ae47e326 2024-12-04T15:22:39,628 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/cdb4c0732aaa41baa1e781f3ae47e326, entries=150, sequenceid=449, filesize=12.0 K 2024-12-04T15:22:39,638 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for e068873424c7ff89600b835c0496bec4 in 1053ms, sequenceid=449, compaction requested=false 2024-12-04T15:22:39,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:39,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:39,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-04T15:22:39,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-04T15:22:39,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-04T15:22:39,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2150 sec 2024-12-04T15:22:39,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.2200 sec 2024-12-04T15:22:39,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-04T15:22:39,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:39,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:39,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:39,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:39,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:39,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:39,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:39,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e5c5a822b93441499c827779a07b4a80 is 50, key is test_row_0/A:col10/1733325759773/Put/seqid=0 2024-12-04T15:22:39,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325819806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325819807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325819812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742185_1361 (size=12301) 2024-12-04T15:22:39,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e5c5a822b93441499c827779a07b4a80 2024-12-04T15:22:39,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325819911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325819911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/610e42eb0f92456c9a3fad95b5542760 is 50, key is test_row_0/B:col10/1733325759773/Put/seqid=0 2024-12-04T15:22:39,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325819925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:39,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742186_1362 (size=12301) 2024-12-04T15:22:39,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/610e42eb0f92456c9a3fad95b5542760 2024-12-04T15:22:39,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/33944f70fbbb4b55ac7352ad0acd7ee9 is 50, key is test_row_0/C:col10/1733325759773/Put/seqid=0 2024-12-04T15:22:40,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742187_1363 (size=12301) 2024-12-04T15:22:40,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325820115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325820119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325820130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/33944f70fbbb4b55ac7352ad0acd7ee9 2024-12-04T15:22:40,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325820423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/e5c5a822b93441499c827779a07b4a80 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e5c5a822b93441499c827779a07b4a80 2024-12-04T15:22:40,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325820424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e5c5a822b93441499c827779a07b4a80, entries=150, sequenceid=471, filesize=12.0 K 2024-12-04T15:22:40,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/610e42eb0f92456c9a3fad95b5542760 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/610e42eb0f92456c9a3fad95b5542760 2024-12-04T15:22:40,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325820432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/610e42eb0f92456c9a3fad95b5542760, entries=150, sequenceid=471, filesize=12.0 K 2024-12-04T15:22:40,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/33944f70fbbb4b55ac7352ad0acd7ee9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/33944f70fbbb4b55ac7352ad0acd7ee9 2024-12-04T15:22:40,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/33944f70fbbb4b55ac7352ad0acd7ee9, entries=150, sequenceid=471, filesize=12.0 K 2024-12-04T15:22:40,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e068873424c7ff89600b835c0496bec4 in 667ms, sequenceid=471, compaction requested=true 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:40,441 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:40,441 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:40,442 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:40,442 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:40,442 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:40,442 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:40,442 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:40,442 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f84b0e6a6e5c4a97b1248fc62cfd8c61, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/edb3a610c0ce4126be998667e3dc6e54, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/610e42eb0f92456c9a3fad95b5542760] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.0 K 2024-12-04T15:22:40,442 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:40,443 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/1262cdbe2c9443bbb2006609ec8b0212, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/37d3f3f126c3476991773e13fe43e6c8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e5c5a822b93441499c827779a07b4a80] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.0 K 2024-12-04T15:22:40,443 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f84b0e6a6e5c4a97b1248fc62cfd8c61, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1733325757601 2024-12-04T15:22:40,443 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1262cdbe2c9443bbb2006609ec8b0212, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1733325757601 2024-12-04T15:22:40,443 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting edb3a610c0ce4126be998667e3dc6e54, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1733325758304 2024-12-04T15:22:40,443 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37d3f3f126c3476991773e13fe43e6c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1733325758304 2024-12-04T15:22:40,448 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 610e42eb0f92456c9a3fad95b5542760, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733325759767 2024-12-04T15:22:40,448 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5c5a822b93441499c827779a07b4a80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733325759767 2024-12-04T15:22:40,470 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#315 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:40,471 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b3b391859bf94d1e8a4db7f017e07382 is 50, key is test_row_0/A:col10/1733325759773/Put/seqid=0 2024-12-04T15:22:40,473 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#316 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:40,474 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/440ee2cb93fa43239b66d6bf378759b8 is 50, key is test_row_0/B:col10/1733325759773/Put/seqid=0 2024-12-04T15:22:40,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742188_1364 (size=13425) 2024-12-04T15:22:40,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742189_1365 (size=13425) 2024-12-04T15:22:40,486 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b3b391859bf94d1e8a4db7f017e07382 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b3b391859bf94d1e8a4db7f017e07382 2024-12-04T15:22:40,502 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/440ee2cb93fa43239b66d6bf378759b8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/440ee2cb93fa43239b66d6bf378759b8 2024-12-04T15:22:40,516 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into b3b391859bf94d1e8a4db7f017e07382(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:40,516 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:40,516 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325760441; duration=0sec 2024-12-04T15:22:40,516 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:40,516 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:40,516 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:40,520 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:40,520 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:40,520 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:40,520 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b5fb6e9d7ee747008b28b9105f5a7c2a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/cdb4c0732aaa41baa1e781f3ae47e326, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/33944f70fbbb4b55ac7352ad0acd7ee9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.0 K 2024-12-04T15:22:40,521 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5fb6e9d7ee747008b28b9105f5a7c2a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1733325757601 2024-12-04T15:22:40,521 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting cdb4c0732aaa41baa1e781f3ae47e326, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1733325758304 2024-12-04T15:22:40,522 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33944f70fbbb4b55ac7352ad0acd7ee9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733325759767 2024-12-04T15:22:40,524 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 440ee2cb93fa43239b66d6bf378759b8(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:40,524 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:40,524 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325760441; duration=0sec 2024-12-04T15:22:40,524 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:40,524 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:40,530 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#317 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:40,531 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/07769290b45c437e8a7e555256372eeb is 50, key is test_row_0/C:col10/1733325759773/Put/seqid=0 2024-12-04T15:22:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-04T15:22:40,542 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-04T15:22:40,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-04T15:22:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-04T15:22:40,546 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742190_1366 (size=13391) 2024-12-04T15:22:40,547 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:40,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-04T15:22:40,698 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-04T15:22:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:40,699 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:22:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:40,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/59f8cc43f0e04c878b703987973a4f65 is 50, key is test_row_0/A:col10/1733325759798/Put/seqid=0 2024-12-04T15:22:40,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742191_1367 (size=12301) 2024-12-04T15:22:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-04T15:22:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:40,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:40,955 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/07769290b45c437e8a7e555256372eeb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/07769290b45c437e8a7e555256372eeb 2024-12-04T15:22:40,964 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 07769290b45c437e8a7e555256372eeb(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:40,964 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:40,964 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325760441; duration=0sec 2024-12-04T15:22:40,964 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:40,964 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:40,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325820973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325820975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:40,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325820976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325821078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325821080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325821080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,119 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/59f8cc43f0e04c878b703987973a4f65 2024-12-04T15:22:41,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/683960e3926f4c1f9b4aa52d4b526811 is 50, key is test_row_0/B:col10/1733325759798/Put/seqid=0 2024-12-04T15:22:41,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742192_1368 (size=12301) 2024-12-04T15:22:41,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-04T15:22:41,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325821280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325821282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325821283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,543 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/683960e3926f4c1f9b4aa52d4b526811 2024-12-04T15:22:41,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325821585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325821587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:41,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325821587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:41,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2df1ff26828a4a2da581246559772604 is 50, key is test_row_0/C:col10/1733325759798/Put/seqid=0 2024-12-04T15:22:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742193_1369 (size=12301) 2024-12-04T15:22:41,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-04T15:22:42,016 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2df1ff26828a4a2da581246559772604 2024-12-04T15:22:42,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/59f8cc43f0e04c878b703987973a4f65 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/59f8cc43f0e04c878b703987973a4f65 2024-12-04T15:22:42,036 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/59f8cc43f0e04c878b703987973a4f65, entries=150, sequenceid=488, filesize=12.0 K 2024-12-04T15:22:42,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/683960e3926f4c1f9b4aa52d4b526811 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/683960e3926f4c1f9b4aa52d4b526811 2024-12-04T15:22:42,041 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/683960e3926f4c1f9b4aa52d4b526811, entries=150, sequenceid=488, filesize=12.0 K 2024-12-04T15:22:42,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/2df1ff26828a4a2da581246559772604 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2df1ff26828a4a2da581246559772604 2024-12-04T15:22:42,048 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2df1ff26828a4a2da581246559772604, entries=150, sequenceid=488, filesize=12.0 K 2024-12-04T15:22:42,053 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e068873424c7ff89600b835c0496bec4 in 1353ms, sequenceid=488, compaction requested=false 2024-12-04T15:22:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-04T15:22:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-04T15:22:42,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-04T15:22:42,056 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5070 sec 2024-12-04T15:22:42,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.5130 sec 2024-12-04T15:22:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:42,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:22:42,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:42,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:42,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:42,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d3ffbbfd90ef42598cb0076b4389fc7e is 50, key is test_row_0/A:col10/1733325760974/Put/seqid=0 2024-12-04T15:22:42,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742194_1370 (size=12301) 2024-12-04T15:22:42,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d3ffbbfd90ef42598cb0076b4389fc7e 2024-12-04T15:22:42,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/214a8136d9494168a1dec614a494febb is 50, key is test_row_0/B:col10/1733325760974/Put/seqid=0 2024-12-04T15:22:42,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325822112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325822113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325822114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742195_1371 (size=12301) 2024-12-04T15:22:42,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325822218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325822218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325822220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41116 deadline: 1733325822340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,342 DEBUG [Thread-1218 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:22:42,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41110 deadline: 1733325822344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,346 DEBUG [Thread-1216 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:22:42,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325822420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325822420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325822424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/214a8136d9494168a1dec614a494febb 2024-12-04T15:22:42,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d3c154d9434b4ca5b476a830f3902ccc is 50, key is test_row_0/C:col10/1733325760974/Put/seqid=0 2024-12-04T15:22:42,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742196_1372 (size=12301) 2024-12-04T15:22:42,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d3c154d9434b4ca5b476a830f3902ccc 2024-12-04T15:22:42,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/d3ffbbfd90ef42598cb0076b4389fc7e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d3ffbbfd90ef42598cb0076b4389fc7e 2024-12-04T15:22:42,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d3ffbbfd90ef42598cb0076b4389fc7e, entries=150, sequenceid=511, filesize=12.0 K 2024-12-04T15:22:42,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/214a8136d9494168a1dec614a494febb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/214a8136d9494168a1dec614a494febb 2024-12-04T15:22:42,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/214a8136d9494168a1dec614a494febb, entries=150, sequenceid=511, filesize=12.0 K 2024-12-04T15:22:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/d3c154d9434b4ca5b476a830f3902ccc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d3c154d9434b4ca5b476a830f3902ccc 2024-12-04T15:22:42,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d3c154d9434b4ca5b476a830f3902ccc, entries=150, sequenceid=511, filesize=12.0 K 2024-12-04T15:22:42,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e068873424c7ff89600b835c0496bec4 in 537ms, sequenceid=511, compaction requested=true 2024-12-04T15:22:42,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:42,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:42,630 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:42,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:42,630 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:42,631 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:42,631 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:42,631 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:42,631 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,631 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:42,632 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b3b391859bf94d1e8a4db7f017e07382, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/59f8cc43f0e04c878b703987973a4f65, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d3ffbbfd90ef42598cb0076b4389fc7e] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.1 K 2024-12-04T15:22:42,632 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,632 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/440ee2cb93fa43239b66d6bf378759b8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/683960e3926f4c1f9b4aa52d4b526811, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/214a8136d9494168a1dec614a494febb] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.1 K 2024-12-04T15:22:42,632 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3b391859bf94d1e8a4db7f017e07382, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733325759767 2024-12-04T15:22:42,632 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 440ee2cb93fa43239b66d6bf378759b8, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733325759767 2024-12-04T15:22:42,633 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59f8cc43f0e04c878b703987973a4f65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1733325759798 2024-12-04T15:22:42,633 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 683960e3926f4c1f9b4aa52d4b526811, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1733325759798 2024-12-04T15:22:42,634 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 214a8136d9494168a1dec614a494febb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733325760972 2024-12-04T15:22:42,634 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3ffbbfd90ef42598cb0076b4389fc7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733325760972 2024-12-04T15:22:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-04T15:22:42,651 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-04T15:22:42,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-04T15:22:42,654 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-04T15:22:42,655 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:42,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:42,657 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:42,657 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1deb1e7fd76449e3a1b85546b4e8e578 is 50, key is test_row_0/B:col10/1733325760974/Put/seqid=0 2024-12-04T15:22:42,660 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#325 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:42,660 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b313edd8da6f4470949f9d96b0748974 is 50, key is test_row_0/A:col10/1733325760974/Put/seqid=0 2024-12-04T15:22:42,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742198_1374 (size=13527) 2024-12-04T15:22:42,707 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b313edd8da6f4470949f9d96b0748974 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b313edd8da6f4470949f9d96b0748974 2024-12-04T15:22:42,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742197_1373 (size=13527) 2024-12-04T15:22:42,714 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into b313edd8da6f4470949f9d96b0748974(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:42,714 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:42,714 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325762630; duration=0sec 2024-12-04T15:22:42,714 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:42,714 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:42,714 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:42,717 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:42,717 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:42,718 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,718 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/07769290b45c437e8a7e555256372eeb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2df1ff26828a4a2da581246559772604, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d3c154d9434b4ca5b476a830f3902ccc] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.1 K 2024-12-04T15:22:42,720 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07769290b45c437e8a7e555256372eeb, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1733325759767 2024-12-04T15:22:42,720 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/1deb1e7fd76449e3a1b85546b4e8e578 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1deb1e7fd76449e3a1b85546b4e8e578 2024-12-04T15:22:42,722 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2df1ff26828a4a2da581246559772604, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1733325759798 2024-12-04T15:22:42,723 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3c154d9434b4ca5b476a830f3902ccc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733325760972 2024-12-04T15:22:42,727 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 1deb1e7fd76449e3a1b85546b4e8e578(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:42,727 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:42,727 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325762630; duration=0sec 2024-12-04T15:22:42,727 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:42,727 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:42,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:22:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:42,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/85aae9ba601141f2a1e71710925de2a0 is 50, key is test_row_0/A:col10/1733325762112/Put/seqid=0 2024-12-04T15:22:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-04T15:22:42,770 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#327 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:42,771 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/fa9300beb43b415a93b621b5eb6555d4 is 50, key is test_row_0/C:col10/1733325760974/Put/seqid=0 2024-12-04T15:22:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742199_1375 (size=14741) 2024-12-04T15:22:42,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/85aae9ba601141f2a1e71710925de2a0 2024-12-04T15:22:42,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742200_1376 (size=13493) 2024-12-04T15:22:42,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7e23486f29314d6a8b82d5ca0f9783b2 is 50, key is test_row_0/B:col10/1733325762112/Put/seqid=0 2024-12-04T15:22:42,802 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/fa9300beb43b415a93b621b5eb6555d4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/fa9300beb43b415a93b621b5eb6555d4 2024-12-04T15:22:42,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325822796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325822798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:42,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:42,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:42,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:42,808 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into fa9300beb43b415a93b621b5eb6555d4(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:42,808 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:42,808 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325762631; duration=0sec 2024-12-04T15:22:42,809 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:42,809 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:42,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325822812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742201_1377 (size=12301) 2024-12-04T15:22:42,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7e23486f29314d6a8b82d5ca0f9783b2 2024-12-04T15:22:42,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/3bebd741a32b4c729fb1e7fed2c3f1b6 is 50, key is test_row_0/C:col10/1733325762112/Put/seqid=0 2024-12-04T15:22:42,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742202_1378 (size=12301) 2024-12-04T15:22:42,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/3bebd741a32b4c729fb1e7fed2c3f1b6 2024-12-04T15:22:42,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/85aae9ba601141f2a1e71710925de2a0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/85aae9ba601141f2a1e71710925de2a0 2024-12-04T15:22:42,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/85aae9ba601141f2a1e71710925de2a0, entries=200, sequenceid=528, filesize=14.4 K 2024-12-04T15:22:42,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7e23486f29314d6a8b82d5ca0f9783b2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7e23486f29314d6a8b82d5ca0f9783b2 2024-12-04T15:22:42,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7e23486f29314d6a8b82d5ca0f9783b2, entries=150, sequenceid=528, filesize=12.0 K 2024-12-04T15:22:42,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325822904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/3bebd741a32b4c729fb1e7fed2c3f1b6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/3bebd741a32b4c729fb1e7fed2c3f1b6 2024-12-04T15:22:42,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325822905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/3bebd741a32b4c729fb1e7fed2c3f1b6, entries=150, sequenceid=528, filesize=12.0 K 2024-12-04T15:22:42,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for e068873424c7ff89600b835c0496bec4 in 182ms, sequenceid=528, compaction requested=false 2024-12-04T15:22:42,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:42,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:22:42,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:42,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:42,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:42,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:42,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/6a00144e88654f3f8e78669f8d262b69 is 50, key is test_row_0/A:col10/1733325762794/Put/seqid=0 2024-12-04T15:22:42,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742203_1379 (size=12301) 2024-12-04T15:22:42,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/6a00144e88654f3f8e78669f8d262b69 2024-12-04T15:22:42,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/5a1c2a32726a423b8b2c117640da9928 is 50, key is test_row_0/B:col10/1733325762794/Put/seqid=0 2024-12-04T15:22:42,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325822947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742204_1380 (size=12301) 2024-12-04T15:22:42,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/5a1c2a32726a423b8b2c117640da9928 2024-12-04T15:22:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-04T15:22:42,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:42,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:42,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:42,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:42,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:42,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:42,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ea5c38795f57494bbb59407d8fb4cebd is 50, key is test_row_0/C:col10/1733325762794/Put/seqid=0 2024-12-04T15:22:42,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742205_1381 (size=12301) 2024-12-04T15:22:43,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325823050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325823107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325823109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:43,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:43,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-04T15:22:43,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325823260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:43,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:43,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ea5c38795f57494bbb59407d8fb4cebd 2024-12-04T15:22:43,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/6a00144e88654f3f8e78669f8d262b69 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/6a00144e88654f3f8e78669f8d262b69 2024-12-04T15:22:43,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/6a00144e88654f3f8e78669f8d262b69, entries=150, sequenceid=552, filesize=12.0 K 2024-12-04T15:22:43,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/5a1c2a32726a423b8b2c117640da9928 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5a1c2a32726a423b8b2c117640da9928 2024-12-04T15:22:43,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5a1c2a32726a423b8b2c117640da9928, entries=150, sequenceid=552, filesize=12.0 K 2024-12-04T15:22:43,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/ea5c38795f57494bbb59407d8fb4cebd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ea5c38795f57494bbb59407d8fb4cebd 2024-12-04T15:22:43,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ea5c38795f57494bbb59407d8fb4cebd, entries=150, sequenceid=552, filesize=12.0 K 2024-12-04T15:22:43,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e068873424c7ff89600b835c0496bec4 in 480ms, sequenceid=552, compaction requested=true 2024-12-04T15:22:43,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:43,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:43,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:43,399 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:43,399 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:43,400 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40569 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:43,401 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:43,401 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,401 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b313edd8da6f4470949f9d96b0748974, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/85aae9ba601141f2a1e71710925de2a0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/6a00144e88654f3f8e78669f8d262b69] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=39.6 K 2024-12-04T15:22:43,401 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:43,401 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:43,401 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,401 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1deb1e7fd76449e3a1b85546b4e8e578, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7e23486f29314d6a8b82d5ca0f9783b2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5a1c2a32726a423b8b2c117640da9928] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.2 K 2024-12-04T15:22:43,402 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b313edd8da6f4470949f9d96b0748974, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733325760972 2024-12-04T15:22:43,402 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1deb1e7fd76449e3a1b85546b4e8e578, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733325760972 2024-12-04T15:22:43,402 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85aae9ba601141f2a1e71710925de2a0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1733325762095 2024-12-04T15:22:43,402 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e23486f29314d6a8b82d5ca0f9783b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1733325762095 2024-12-04T15:22:43,403 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a00144e88654f3f8e78669f8d262b69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=552, earliestPutTs=1733325762790 2024-12-04T15:22:43,403 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a1c2a32726a423b8b2c117640da9928, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=552, earliestPutTs=1733325762790 2024-12-04T15:22:43,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:43,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:43,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:43,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:43,414 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#333 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:43,414 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/536edaee019f4be2a8124aedea35f2ed is 50, key is test_row_0/A:col10/1733325762794/Put/seqid=0 2024-12-04T15:22:43,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,420 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#334 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:43,421 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/bd66fcfeb98041658604a60a264b713d is 50, key is test_row_0/B:col10/1733325762794/Put/seqid=0 2024-12-04T15:22:43,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/5ebdf763b1064d439c18c859029fbaaa is 50, key is test_row_0/A:col10/1733325763413/Put/seqid=0 2024-12-04T15:22:43,426 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:43,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:43,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742207_1383 (size=13629) 2024-12-04T15:22:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742206_1382 (size=13629) 2024-12-04T15:22:43,472 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/bd66fcfeb98041658604a60a264b713d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bd66fcfeb98041658604a60a264b713d 2024-12-04T15:22:43,478 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/536edaee019f4be2a8124aedea35f2ed as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/536edaee019f4be2a8124aedea35f2ed 2024-12-04T15:22:43,480 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into bd66fcfeb98041658604a60a264b713d(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:43,480 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:43,480 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=13, startTime=1733325763399; duration=0sec 2024-12-04T15:22:43,480 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:43,480 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:43,480 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:22:43,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325823477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,483 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:22:43,483 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:43,483 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,483 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/fa9300beb43b415a93b621b5eb6555d4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/3bebd741a32b4c729fb1e7fed2c3f1b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ea5c38795f57494bbb59407d8fb4cebd] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=37.2 K 2024-12-04T15:22:43,483 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fa9300beb43b415a93b621b5eb6555d4, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1733325760972 2024-12-04T15:22:43,484 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bebd741a32b4c729fb1e7fed2c3f1b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1733325762095 2024-12-04T15:22:43,484 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into 536edaee019f4be2a8124aedea35f2ed(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:43,484 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:43,484 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=13, startTime=1733325763399; duration=0sec 2024-12-04T15:22:43,485 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:43,485 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:43,486 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ea5c38795f57494bbb59407d8fb4cebd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=552, earliestPutTs=1733325762790 2024-12-04T15:22:43,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742208_1384 (size=12301) 2024-12-04T15:22:43,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=567 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/5ebdf763b1064d439c18c859029fbaaa 2024-12-04T15:22:43,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325823482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/dd2457c8bae04c38a95f95dedcb78037 is 50, key is test_row_0/B:col10/1733325763413/Put/seqid=0 2024-12-04T15:22:43,501 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:43,502 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/58c2f8dcbbe94ddfbb20d7fe8b170d32 is 50, key is test_row_0/C:col10/1733325762794/Put/seqid=0 2024-12-04T15:22:43,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742210_1386 (size=13595) 2024-12-04T15:22:43,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325823566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742209_1385 (size=12301) 2024-12-04T15:22:43,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=567 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/dd2457c8bae04c38a95f95dedcb78037 2024-12-04T15:22:43,582 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:43,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:43,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325823583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325823589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/4cb28dd3e3814fc6b4b087758d39fca2 is 50, key is test_row_0/C:col10/1733325763413/Put/seqid=0 2024-12-04T15:22:43,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742211_1387 (size=12301) 2024-12-04T15:22:43,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=567 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/4cb28dd3e3814fc6b4b087758d39fca2 2024-12-04T15:22:43,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/5ebdf763b1064d439c18c859029fbaaa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/5ebdf763b1064d439c18c859029fbaaa 2024-12-04T15:22:43,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/5ebdf763b1064d439c18c859029fbaaa, entries=150, sequenceid=567, filesize=12.0 K 2024-12-04T15:22:43,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/dd2457c8bae04c38a95f95dedcb78037 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dd2457c8bae04c38a95f95dedcb78037 2024-12-04T15:22:43,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dd2457c8bae04c38a95f95dedcb78037, entries=150, sequenceid=567, filesize=12.0 K 2024-12-04T15:22:43,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/4cb28dd3e3814fc6b4b087758d39fca2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4cb28dd3e3814fc6b4b087758d39fca2 2024-12-04T15:22:43,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4cb28dd3e3814fc6b4b087758d39fca2, entries=150, sequenceid=567, filesize=12.0 K 2024-12-04T15:22:43,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e068873424c7ff89600b835c0496bec4 in 241ms, sequenceid=567, compaction requested=false 2024-12-04T15:22:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:43,736 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,737 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:43,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/47fa153514a64595abf4ddc15e965a3c is 50, key is test_row_0/A:col10/1733325763475/Put/seqid=0 2024-12-04T15:22:43,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742212_1388 (size=12301) 2024-12-04T15:22:43,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-04T15:22:43,759 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/47fa153514a64595abf4ddc15e965a3c 2024-12-04T15:22:43,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/61393d101e8a4650a0e374547cdec590 is 50, key is test_row_0/B:col10/1733325763475/Put/seqid=0 2024-12-04T15:22:43,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:43,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:43,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742213_1389 (size=12301) 2024-12-04T15:22:43,800 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/61393d101e8a4650a0e374547cdec590 2024-12-04T15:22:43,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/97ba582478fe434cac6e809677527ed1 is 50, key is test_row_0/C:col10/1733325763475/Put/seqid=0 2024-12-04T15:22:43,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325823817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325823821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742214_1390 (size=12301) 2024-12-04T15:22:43,834 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/97ba582478fe434cac6e809677527ed1 2024-12-04T15:22:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/47fa153514a64595abf4ddc15e965a3c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/47fa153514a64595abf4ddc15e965a3c 2024-12-04T15:22:43,845 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/47fa153514a64595abf4ddc15e965a3c, entries=150, sequenceid=590, filesize=12.0 K 2024-12-04T15:22:43,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/61393d101e8a4650a0e374547cdec590 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61393d101e8a4650a0e374547cdec590 2024-12-04T15:22:43,851 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61393d101e8a4650a0e374547cdec590, entries=150, sequenceid=590, filesize=12.0 K 2024-12-04T15:22:43,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/97ba582478fe434cac6e809677527ed1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/97ba582478fe434cac6e809677527ed1 2024-12-04T15:22:43,856 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/97ba582478fe434cac6e809677527ed1, entries=150, sequenceid=590, filesize=12.0 K 2024-12-04T15:22:43,857 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e068873424c7ff89600b835c0496bec4 in 120ms, sequenceid=590, compaction requested=true 2024-12-04T15:22:43,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:43,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:43,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-04T15:22:43,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-04T15:22:43,860 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-04T15:22:43,860 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2030 sec 2024-12-04T15:22:43,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.2080 sec 2024-12-04T15:22:43,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:43,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-04T15:22:43,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:43,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:43,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:43,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:43,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b202d138df25413f91223e3e55af3ab6 is 50, key is test_row_0/A:col10/1733325763923/Put/seqid=0 2024-12-04T15:22:43,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742215_1391 (size=12301) 2024-12-04T15:22:43,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325823960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,967 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/58c2f8dcbbe94ddfbb20d7fe8b170d32 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/58c2f8dcbbe94ddfbb20d7fe8b170d32 2024-12-04T15:22:43,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:43,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325823964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:43,972 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 58c2f8dcbbe94ddfbb20d7fe8b170d32(size=13.3 K), total size for store is 37.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:43,972 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:43,972 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=13, startTime=1733325763409; duration=0sec 2024-12-04T15:22:43,972 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:43,972 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:44,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325824065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325824069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41080 deadline: 1733325824072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325824269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325824271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=606 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b202d138df25413f91223e3e55af3ab6 2024-12-04T15:22:44,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/9159c91129674f6c9d056f33c553945f is 50, key is test_row_0/B:col10/1733325763923/Put/seqid=0 2024-12-04T15:22:44,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742216_1392 (size=12301) 2024-12-04T15:22:44,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=606 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/9159c91129674f6c9d056f33c553945f 2024-12-04T15:22:44,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/7203442a0388451cb030f6daa16121fa is 50, key is test_row_0/C:col10/1733325763923/Put/seqid=0 2024-12-04T15:22:44,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742217_1393 (size=12301) 2024-12-04T15:22:44,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1733325824573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:44,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41094 deadline: 1733325824575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-04T15:22:44,759 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-04T15:22:44,761 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-04T15:22:44,763 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:44,763 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:44,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:44,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-04T15:22:44,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=606 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/7203442a0388451cb030f6daa16121fa 2024-12-04T15:22:44,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/b202d138df25413f91223e3e55af3ab6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b202d138df25413f91223e3e55af3ab6 2024-12-04T15:22:44,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b202d138df25413f91223e3e55af3ab6, entries=150, sequenceid=606, filesize=12.0 K 2024-12-04T15:22:44,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/9159c91129674f6c9d056f33c553945f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/9159c91129674f6c9d056f33c553945f 2024-12-04T15:22:44,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/9159c91129674f6c9d056f33c553945f, entries=150, sequenceid=606, filesize=12.0 K 2024-12-04T15:22:44,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/7203442a0388451cb030f6daa16121fa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7203442a0388451cb030f6daa16121fa 2024-12-04T15:22:44,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7203442a0388451cb030f6daa16121fa, entries=150, sequenceid=606, filesize=12.0 K 2024-12-04T15:22:44,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e068873424c7ff89600b835c0496bec4 in 868ms, sequenceid=606, compaction requested=true 2024-12-04T15:22:44,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:44,793 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:44,794 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50532 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:44,794 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/A is initiating minor compaction (all files) 2024-12-04T15:22:44,794 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/A in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:44,794 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/536edaee019f4be2a8124aedea35f2ed, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/5ebdf763b1064d439c18c859029fbaaa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/47fa153514a64595abf4ddc15e965a3c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b202d138df25413f91223e3e55af3ab6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=49.3 K 2024-12-04T15:22:44,795 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 536edaee019f4be2a8124aedea35f2ed, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=552, earliestPutTs=1733325762790 2024-12-04T15:22:44,795 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ebdf763b1064d439c18c859029fbaaa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=567, earliestPutTs=1733325762945 2024-12-04T15:22:44,795 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47fa153514a64595abf4ddc15e965a3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1733325763463 2024-12-04T15:22:44,796 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b202d138df25413f91223e3e55af3ab6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=606, earliestPutTs=1733325763810 2024-12-04T15:22:44,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:22:44,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:44,798 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:44,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:22:44,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:44,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e068873424c7ff89600b835c0496bec4:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:22:44,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:44,800 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50532 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:44,800 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/B is initiating minor compaction (all files) 2024-12-04T15:22:44,800 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/B in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:44,800 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bd66fcfeb98041658604a60a264b713d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dd2457c8bae04c38a95f95dedcb78037, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61393d101e8a4650a0e374547cdec590, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/9159c91129674f6c9d056f33c553945f] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=49.3 K 2024-12-04T15:22:44,801 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting bd66fcfeb98041658604a60a264b713d, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=552, earliestPutTs=1733325762790 2024-12-04T15:22:44,801 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting dd2457c8bae04c38a95f95dedcb78037, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=567, earliestPutTs=1733325762945 2024-12-04T15:22:44,802 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 61393d101e8a4650a0e374547cdec590, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1733325763463 2024-12-04T15:22:44,802 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9159c91129674f6c9d056f33c553945f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=606, earliestPutTs=1733325763810 2024-12-04T15:22:44,816 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#A#compaction#345 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:44,817 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/bff6df7b73d84bcfbd8990d3da969f3c is 50, key is test_row_0/A:col10/1733325763923/Put/seqid=0 2024-12-04T15:22:44,822 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#B#compaction#346 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:44,823 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7a61388154dd473e91e35f52f8e6ccd8 is 50, key is test_row_0/B:col10/1733325763923/Put/seqid=0 2024-12-04T15:22:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742218_1394 (size=13765) 2024-12-04T15:22:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742219_1395 (size=13765) 2024-12-04T15:22:44,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-04T15:22:44,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:44,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:44,920 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:44,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:44,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/ec95e325a7cb43608893f365f586a8f0 is 50, key is test_row_0/A:col10/1733325763959/Put/seqid=0 2024-12-04T15:22:44,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742220_1396 (size=12301) 2024-12-04T15:22:45,048 DEBUG [Thread-1221 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x072b9016 to 127.0.0.1:55739 2024-12-04T15:22:45,048 DEBUG [Thread-1225 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a7e7237 to 127.0.0.1:55739 2024-12-04T15:22:45,049 DEBUG [Thread-1225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,049 DEBUG [Thread-1221 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,051 DEBUG [Thread-1227 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07498874 to 127.0.0.1:55739 2024-12-04T15:22:45,051 DEBUG [Thread-1227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,051 DEBUG [Thread-1223 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x231730e5 to 127.0.0.1:55739 2024-12-04T15:22:45,051 DEBUG [Thread-1223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,052 DEBUG [Thread-1229 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74fdeaca to 127.0.0.1:55739 2024-12-04T15:22:45,052 DEBUG [Thread-1229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-04T15:22:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:45,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. as already flushing 2024-12-04T15:22:45,077 DEBUG [Thread-1214 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38d7ecf1 to 127.0.0.1:55739 2024-12-04T15:22:45,077 DEBUG [Thread-1214 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,077 DEBUG [Thread-1212 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76744267 to 127.0.0.1:55739 2024-12-04T15:22:45,078 DEBUG [Thread-1212 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,079 DEBUG [Thread-1210 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3052ca92 to 127.0.0.1:55739 2024-12-04T15:22:45,079 DEBUG [Thread-1210 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:45,264 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/7a61388154dd473e91e35f52f8e6ccd8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a61388154dd473e91e35f52f8e6ccd8 2024-12-04T15:22:45,265 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/bff6df7b73d84bcfbd8990d3da969f3c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/bff6df7b73d84bcfbd8990d3da969f3c 2024-12-04T15:22:45,269 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/A of e068873424c7ff89600b835c0496bec4 into bff6df7b73d84bcfbd8990d3da969f3c(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:45,269 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/A, priority=12, startTime=1733325764793; duration=0sec 2024-12-04T15:22:45,269 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/B of e068873424c7ff89600b835c0496bec4 into 7a61388154dd473e91e35f52f8e6ccd8(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:22:45,269 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/B, priority=12, startTime=1733325764798; duration=0sec 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:A 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:45,269 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:B 2024-12-04T15:22:45,270 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:22:45,270 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): e068873424c7ff89600b835c0496bec4/C is initiating minor compaction (all files) 2024-12-04T15:22:45,270 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e068873424c7ff89600b835c0496bec4/C in TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:45,270 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/58c2f8dcbbe94ddfbb20d7fe8b170d32, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4cb28dd3e3814fc6b4b087758d39fca2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/97ba582478fe434cac6e809677527ed1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7203442a0388451cb030f6daa16121fa] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp, totalSize=49.3 K 2024-12-04T15:22:45,271 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58c2f8dcbbe94ddfbb20d7fe8b170d32, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=552, earliestPutTs=1733325762790 2024-12-04T15:22:45,271 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cb28dd3e3814fc6b4b087758d39fca2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=567, earliestPutTs=1733325762945 2024-12-04T15:22:45,271 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97ba582478fe434cac6e809677527ed1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1733325763463 2024-12-04T15:22:45,271 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7203442a0388451cb030f6daa16121fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=606, earliestPutTs=1733325763810 2024-12-04T15:22:45,279 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e068873424c7ff89600b835c0496bec4#C#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:22:45,280 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/0863ae0d0c9d417f89cce57a10953f27 is 50, key is test_row_0/C:col10/1733325763923/Put/seqid=0 2024-12-04T15:22:45,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742221_1397 (size=13731) 2024-12-04T15:22:45,340 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=627 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/ec95e325a7cb43608893f365f586a8f0 2024-12-04T15:22:45,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/a4ca79f36154491eb4db9cb5a05b6d5b is 50, key is test_row_0/B:col10/1733325763959/Put/seqid=0 2024-12-04T15:22:45,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742222_1398 (size=12301) 2024-12-04T15:22:45,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-04T15:22:45,688 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/0863ae0d0c9d417f89cce57a10953f27 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/0863ae0d0c9d417f89cce57a10953f27 2024-12-04T15:22:45,692 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e068873424c7ff89600b835c0496bec4/C of e068873424c7ff89600b835c0496bec4 into 0863ae0d0c9d417f89cce57a10953f27(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:22:45,692 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:45,692 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4., storeName=e068873424c7ff89600b835c0496bec4/C, priority=12, startTime=1733325764800; duration=0sec 2024-12-04T15:22:45,692 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:22:45,692 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e068873424c7ff89600b835c0496bec4:C 2024-12-04T15:22:45,754 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=627 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/a4ca79f36154491eb4db9cb5a05b6d5b 2024-12-04T15:22:45,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/20f555214b90400da9cb441fdb72f533 is 50, key is test_row_0/C:col10/1733325763959/Put/seqid=0 2024-12-04T15:22:45,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742223_1399 (size=12301) 2024-12-04T15:22:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-04T15:22:46,164 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=627 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/20f555214b90400da9cb441fdb72f533 2024-12-04T15:22:46,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/ec95e325a7cb43608893f365f586a8f0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/ec95e325a7cb43608893f365f586a8f0 2024-12-04T15:22:46,172 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/ec95e325a7cb43608893f365f586a8f0, entries=150, sequenceid=627, filesize=12.0 K 2024-12-04T15:22:46,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/a4ca79f36154491eb4db9cb5a05b6d5b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/a4ca79f36154491eb4db9cb5a05b6d5b 2024-12-04T15:22:46,176 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/a4ca79f36154491eb4db9cb5a05b6d5b, entries=150, sequenceid=627, filesize=12.0 K 2024-12-04T15:22:46,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/20f555214b90400da9cb441fdb72f533 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/20f555214b90400da9cb441fdb72f533 2024-12-04T15:22:46,180 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/20f555214b90400da9cb441fdb72f533, entries=150, sequenceid=627, filesize=12.0 K 2024-12-04T15:22:46,181 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=20.13 KB/20610 for e068873424c7ff89600b835c0496bec4 in 1261ms, sequenceid=627, compaction requested=false 2024-12-04T15:22:46,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:46,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:46,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-04T15:22:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-04T15:22:46,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-04T15:22:46,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4180 sec 2024-12-04T15:22:46,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.4230 sec 2024-12-04T15:22:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-04T15:22:46,873 INFO [Thread-1220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-04T15:22:52,213 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T15:22:52,401 DEBUG [Thread-1216 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79badc6f to 127.0.0.1:55739 2024-12-04T15:22:52,402 DEBUG [Thread-1216 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:52,408 DEBUG [Thread-1218 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a60caef to 127.0.0.1:55739 2024-12-04T15:22:52,408 DEBUG [Thread-1218 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:52,408 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-04T15:22:52,408 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 144 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 113 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 130 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4790 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4708 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4680 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4773 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4717 2024-12-04T15:22:52,409 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-04T15:22:52,409 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:22:52,409 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x474663e0 to 127.0.0.1:55739 2024-12-04T15:22:52,409 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:22:52,409 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-04T15:22:52,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-04T15:22:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-04T15:22:52,412 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325772412"}]},"ts":"1733325772412"} 2024-12-04T15:22:52,413 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-04T15:22:52,415 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-04T15:22:52,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:22:52,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, UNASSIGN}] 2024-12-04T15:22:52,417 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, UNASSIGN 2024-12-04T15:22:52,417 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=e068873424c7ff89600b835c0496bec4, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:52,418 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:22:52,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:22:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-04T15:22:52,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:52,569 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:52,569 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing e068873424c7ff89600b835c0496bec4, disabling compactions & flushes 2024-12-04T15:22:52,570 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. after waiting 0 ms 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:52,570 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(2837): Flushing e068873424c7ff89600b835c0496bec4 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=A 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=B 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e068873424c7ff89600b835c0496bec4, store=C 2024-12-04T15:22:52,570 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:52,573 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/564324b9334b46a3914f32d6fe8717ce is 50, key is test_row_0/A:col10/1733325772407/Put/seqid=0 2024-12-04T15:22:52,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742224_1400 (size=12301) 2024-12-04T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-04T15:22:52,977 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=638 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/564324b9334b46a3914f32d6fe8717ce 2024-12-04T15:22:52,984 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/88c2d7490d4649328d5bb151d59ef697 is 50, key is test_row_0/B:col10/1733325772407/Put/seqid=0 2024-12-04T15:22:52,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742225_1401 (size=12301) 2024-12-04T15:22:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-04T15:22:53,388 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=638 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/88c2d7490d4649328d5bb151d59ef697 2024-12-04T15:22:53,395 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/50be6bd79f854bb886cdfa22daa964c1 is 50, key is test_row_0/C:col10/1733325772407/Put/seqid=0 2024-12-04T15:22:53,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742226_1402 (size=12301) 2024-12-04T15:22:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-04T15:22:53,799 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=638 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/50be6bd79f854bb886cdfa22daa964c1 2024-12-04T15:22:53,803 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/A/564324b9334b46a3914f32d6fe8717ce as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/564324b9334b46a3914f32d6fe8717ce 2024-12-04T15:22:53,806 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/564324b9334b46a3914f32d6fe8717ce, entries=150, sequenceid=638, filesize=12.0 K 2024-12-04T15:22:53,807 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/B/88c2d7490d4649328d5bb151d59ef697 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/88c2d7490d4649328d5bb151d59ef697 2024-12-04T15:22:53,810 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/88c2d7490d4649328d5bb151d59ef697, entries=150, sequenceid=638, filesize=12.0 K 2024-12-04T15:22:53,811 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/.tmp/C/50be6bd79f854bb886cdfa22daa964c1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/50be6bd79f854bb886cdfa22daa964c1 2024-12-04T15:22:53,814 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/50be6bd79f854bb886cdfa22daa964c1, entries=150, sequenceid=638, filesize=12.0 K 2024-12-04T15:22:53,815 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for e068873424c7ff89600b835c0496bec4 in 1244ms, sequenceid=638, compaction requested=true 2024-12-04T15:22:53,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/a1970203652c47279396439de167e009, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e23f20c1a6484c7b85f58d913c3d088a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/fd4695fb12424ccd95faed2402a332c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b8cd24914a1b4ba2a6aa502baea7e858, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/f0f7cea1c54c40a3a1069acb28f0bfbc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c2c9b90a98304636b1bd1ab15fb8ffe8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c3e8b544879a4ae5b2a7da4349bb93aa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/55a202e27cf14bb1bda23e99439c5126, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/0e1e2f99f3854e27904b0e96c9cb0ab3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/02d4c81b682b44e882d69a7bcb93fc2c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e6d515fc4d8948599992e690b428b476, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0b28fc2fd264d26978a490620219428, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d0df8282bf7541f8a70d574e6077dfc9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/baef8709ce0148908f186e734a4584bb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3737203768824adda04e867470f38033, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d525fd91820c4ae3af422a77a7ab2e6c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/4c18f0c4a6654f14a5086e59a7b5c419, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e447320dedd74704b3f06e4dc6561d5a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3ce0b08e531a459a8c2c316c2b18c90e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/16437fec2c104bb6b0768571fdb27c25, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8c61e042cb6d443c8a5cbc950476e3bc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/269f46413f474f4291bc6e6fe2da5817, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/9ed14754d956440ba110ebf54f14e8e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8139bdd96761415ab15f72beceda9523, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3fcbb056bf284631a21bfe7eef4dac6b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0a55abd0dde4bf9a42c610468bc5366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/96dec61512e743c9a808640efa8a7723, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/002b8f431e7240e4a77a556a124ea41c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3a51b5dc9b8444b2b0b8febbe96a7d96, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/1262cdbe2c9443bbb2006609ec8b0212, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/98d493f91d0d451eab79d21d59ba4466, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/37d3f3f126c3476991773e13fe43e6c8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b3b391859bf94d1e8a4db7f017e07382, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e5c5a822b93441499c827779a07b4a80, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/59f8cc43f0e04c878b703987973a4f65, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b313edd8da6f4470949f9d96b0748974, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d3ffbbfd90ef42598cb0076b4389fc7e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/85aae9ba601141f2a1e71710925de2a0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/536edaee019f4be2a8124aedea35f2ed, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/6a00144e88654f3f8e78669f8d262b69, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/5ebdf763b1064d439c18c859029fbaaa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/47fa153514a64595abf4ddc15e965a3c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b202d138df25413f91223e3e55af3ab6] to archive 2024-12-04T15:22:53,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:22:53,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/a1970203652c47279396439de167e009 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/a1970203652c47279396439de167e009 2024-12-04T15:22:53,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e23f20c1a6484c7b85f58d913c3d088a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e23f20c1a6484c7b85f58d913c3d088a 2024-12-04T15:22:53,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/fd4695fb12424ccd95faed2402a332c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/fd4695fb12424ccd95faed2402a332c4 2024-12-04T15:22:53,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b8cd24914a1b4ba2a6aa502baea7e858 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b8cd24914a1b4ba2a6aa502baea7e858 2024-12-04T15:22:53,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/f0f7cea1c54c40a3a1069acb28f0bfbc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/f0f7cea1c54c40a3a1069acb28f0bfbc 2024-12-04T15:22:53,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c2c9b90a98304636b1bd1ab15fb8ffe8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c2c9b90a98304636b1bd1ab15fb8ffe8 2024-12-04T15:22:53,823 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c3e8b544879a4ae5b2a7da4349bb93aa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c3e8b544879a4ae5b2a7da4349bb93aa 2024-12-04T15:22:53,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/55a202e27cf14bb1bda23e99439c5126 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/55a202e27cf14bb1bda23e99439c5126 2024-12-04T15:22:53,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/0e1e2f99f3854e27904b0e96c9cb0ab3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/0e1e2f99f3854e27904b0e96c9cb0ab3 2024-12-04T15:22:53,826 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/02d4c81b682b44e882d69a7bcb93fc2c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/02d4c81b682b44e882d69a7bcb93fc2c 2024-12-04T15:22:53,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e6d515fc4d8948599992e690b428b476 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e6d515fc4d8948599992e690b428b476 2024-12-04T15:22:53,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0b28fc2fd264d26978a490620219428 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0b28fc2fd264d26978a490620219428 2024-12-04T15:22:53,828 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d0df8282bf7541f8a70d574e6077dfc9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d0df8282bf7541f8a70d574e6077dfc9 2024-12-04T15:22:53,829 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/baef8709ce0148908f186e734a4584bb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/baef8709ce0148908f186e734a4584bb 2024-12-04T15:22:53,830 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3737203768824adda04e867470f38033 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3737203768824adda04e867470f38033 2024-12-04T15:22:53,832 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d525fd91820c4ae3af422a77a7ab2e6c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d525fd91820c4ae3af422a77a7ab2e6c 2024-12-04T15:22:53,833 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/4c18f0c4a6654f14a5086e59a7b5c419 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/4c18f0c4a6654f14a5086e59a7b5c419 2024-12-04T15:22:53,834 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e447320dedd74704b3f06e4dc6561d5a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e447320dedd74704b3f06e4dc6561d5a 2024-12-04T15:22:53,835 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3ce0b08e531a459a8c2c316c2b18c90e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3ce0b08e531a459a8c2c316c2b18c90e 2024-12-04T15:22:53,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/16437fec2c104bb6b0768571fdb27c25 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/16437fec2c104bb6b0768571fdb27c25 2024-12-04T15:22:53,837 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8c61e042cb6d443c8a5cbc950476e3bc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8c61e042cb6d443c8a5cbc950476e3bc 2024-12-04T15:22:53,837 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/269f46413f474f4291bc6e6fe2da5817 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/269f46413f474f4291bc6e6fe2da5817 2024-12-04T15:22:53,838 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/9ed14754d956440ba110ebf54f14e8e8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/9ed14754d956440ba110ebf54f14e8e8 2024-12-04T15:22:53,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8139bdd96761415ab15f72beceda9523 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/8139bdd96761415ab15f72beceda9523 2024-12-04T15:22:53,840 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3fcbb056bf284631a21bfe7eef4dac6b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3fcbb056bf284631a21bfe7eef4dac6b 2024-12-04T15:22:53,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0a55abd0dde4bf9a42c610468bc5366 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/c0a55abd0dde4bf9a42c610468bc5366 2024-12-04T15:22:53,842 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/96dec61512e743c9a808640efa8a7723 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/96dec61512e743c9a808640efa8a7723 2024-12-04T15:22:53,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/002b8f431e7240e4a77a556a124ea41c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/002b8f431e7240e4a77a556a124ea41c 2024-12-04T15:22:53,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3a51b5dc9b8444b2b0b8febbe96a7d96 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/3a51b5dc9b8444b2b0b8febbe96a7d96 2024-12-04T15:22:53,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/1262cdbe2c9443bbb2006609ec8b0212 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/1262cdbe2c9443bbb2006609ec8b0212 2024-12-04T15:22:53,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/98d493f91d0d451eab79d21d59ba4466 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/98d493f91d0d451eab79d21d59ba4466 2024-12-04T15:22:53,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/37d3f3f126c3476991773e13fe43e6c8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/37d3f3f126c3476991773e13fe43e6c8 2024-12-04T15:22:53,848 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b3b391859bf94d1e8a4db7f017e07382 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b3b391859bf94d1e8a4db7f017e07382 2024-12-04T15:22:53,849 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e5c5a822b93441499c827779a07b4a80 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/e5c5a822b93441499c827779a07b4a80 2024-12-04T15:22:53,851 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/59f8cc43f0e04c878b703987973a4f65 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/59f8cc43f0e04c878b703987973a4f65 2024-12-04T15:22:53,852 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b313edd8da6f4470949f9d96b0748974 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b313edd8da6f4470949f9d96b0748974 2024-12-04T15:22:53,853 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d3ffbbfd90ef42598cb0076b4389fc7e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/d3ffbbfd90ef42598cb0076b4389fc7e 2024-12-04T15:22:53,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/85aae9ba601141f2a1e71710925de2a0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/85aae9ba601141f2a1e71710925de2a0 2024-12-04T15:22:53,855 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/536edaee019f4be2a8124aedea35f2ed to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/536edaee019f4be2a8124aedea35f2ed 2024-12-04T15:22:53,856 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/6a00144e88654f3f8e78669f8d262b69 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/6a00144e88654f3f8e78669f8d262b69 2024-12-04T15:22:53,857 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/5ebdf763b1064d439c18c859029fbaaa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/5ebdf763b1064d439c18c859029fbaaa 2024-12-04T15:22:53,858 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/47fa153514a64595abf4ddc15e965a3c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/47fa153514a64595abf4ddc15e965a3c 2024-12-04T15:22:53,858 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b202d138df25413f91223e3e55af3ab6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/b202d138df25413f91223e3e55af3ab6 2024-12-04T15:22:53,860 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/083f9630898b4a9197bd572e7c6b7b18, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f73480c197344a48a2770fd071c1c46b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/53646222ddbf4d79b3ca577cb5c32691, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/af1fa52b46ff4c1895a9096360020367, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dfdfab13ddd64851a74aea28470e2750, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/97d470e6a13a49edb2d3e01a396fab22, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5b8e8a92e4b34df2b5c8309121e37125, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f0d4080c8bd3403881718742d80f6cd5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/b167aeacb9f3424abf247844cccb94e2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/d7085a756c0a4475a737e97cb98c3b3f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/74efb0f56232425ebe0216763370581e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/0270ea179f07423883bfd5cd0c672618, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1e8e48cd6ffa49b485ad37f00da0a673, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bb0d666ba5534cad987b22d25809bed4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2fb1a01bf8184e47a02f687703adeecc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c2e73d84b73b417ba79c51ebcb1447bc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7c45b3a23c854735939973d8a1922d95, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61ed864e38354ce9a2d0dfd7b8284e5a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/115ee226df3841f8ac1e37bd1cacefd8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a04b2eb60814753be278caa6c708614, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7be6ba45d3e44c6b99bfb5418209d9a3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1a4e5487befd4035b98886cc2a7b5bab, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1dec40a95be640ac86902523585a1ef3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2d6bc1e48bfb48c19349d050a70a108e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1c0c0ad797214b72adecfa75b2cb6f3b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/947ee09864694768897adffd5f811fdd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c06f34291a054fa5a40b89f089b85f5e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/4f78b9fad6bb4e4ea3ee1a999b01c039, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c9c20f1c4896498fa2a98e79bd7739db, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f84b0e6a6e5c4a97b1248fc62cfd8c61, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/184227a6843d441e9d00ed01148180fb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/edb3a610c0ce4126be998667e3dc6e54, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/440ee2cb93fa43239b66d6bf378759b8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/610e42eb0f92456c9a3fad95b5542760, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/683960e3926f4c1f9b4aa52d4b526811, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1deb1e7fd76449e3a1b85546b4e8e578, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/214a8136d9494168a1dec614a494febb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7e23486f29314d6a8b82d5ca0f9783b2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bd66fcfeb98041658604a60a264b713d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5a1c2a32726a423b8b2c117640da9928, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dd2457c8bae04c38a95f95dedcb78037, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61393d101e8a4650a0e374547cdec590, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/9159c91129674f6c9d056f33c553945f] to archive 2024-12-04T15:22:53,861 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:22:53,862 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/083f9630898b4a9197bd572e7c6b7b18 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/083f9630898b4a9197bd572e7c6b7b18 2024-12-04T15:22:53,863 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f73480c197344a48a2770fd071c1c46b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f73480c197344a48a2770fd071c1c46b 2024-12-04T15:22:53,864 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/53646222ddbf4d79b3ca577cb5c32691 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/53646222ddbf4d79b3ca577cb5c32691 2024-12-04T15:22:53,865 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/af1fa52b46ff4c1895a9096360020367 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/af1fa52b46ff4c1895a9096360020367 2024-12-04T15:22:53,866 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dfdfab13ddd64851a74aea28470e2750 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dfdfab13ddd64851a74aea28470e2750 2024-12-04T15:22:53,867 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/97d470e6a13a49edb2d3e01a396fab22 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/97d470e6a13a49edb2d3e01a396fab22 2024-12-04T15:22:53,868 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5b8e8a92e4b34df2b5c8309121e37125 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5b8e8a92e4b34df2b5c8309121e37125 2024-12-04T15:22:53,869 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f0d4080c8bd3403881718742d80f6cd5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f0d4080c8bd3403881718742d80f6cd5 2024-12-04T15:22:53,869 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/b167aeacb9f3424abf247844cccb94e2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/b167aeacb9f3424abf247844cccb94e2 2024-12-04T15:22:53,870 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/d7085a756c0a4475a737e97cb98c3b3f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/d7085a756c0a4475a737e97cb98c3b3f 2024-12-04T15:22:53,871 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/74efb0f56232425ebe0216763370581e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/74efb0f56232425ebe0216763370581e 2024-12-04T15:22:53,872 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/0270ea179f07423883bfd5cd0c672618 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/0270ea179f07423883bfd5cd0c672618 2024-12-04T15:22:53,873 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1e8e48cd6ffa49b485ad37f00da0a673 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1e8e48cd6ffa49b485ad37f00da0a673 2024-12-04T15:22:53,874 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bb0d666ba5534cad987b22d25809bed4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bb0d666ba5534cad987b22d25809bed4 2024-12-04T15:22:53,875 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2fb1a01bf8184e47a02f687703adeecc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2fb1a01bf8184e47a02f687703adeecc 2024-12-04T15:22:53,875 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c2e73d84b73b417ba79c51ebcb1447bc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c2e73d84b73b417ba79c51ebcb1447bc 2024-12-04T15:22:53,876 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7c45b3a23c854735939973d8a1922d95 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7c45b3a23c854735939973d8a1922d95 2024-12-04T15:22:53,877 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61ed864e38354ce9a2d0dfd7b8284e5a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61ed864e38354ce9a2d0dfd7b8284e5a 2024-12-04T15:22:53,878 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/115ee226df3841f8ac1e37bd1cacefd8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/115ee226df3841f8ac1e37bd1cacefd8 2024-12-04T15:22:53,879 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a04b2eb60814753be278caa6c708614 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a04b2eb60814753be278caa6c708614 2024-12-04T15:22:53,880 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7be6ba45d3e44c6b99bfb5418209d9a3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7be6ba45d3e44c6b99bfb5418209d9a3 2024-12-04T15:22:53,881 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1a4e5487befd4035b98886cc2a7b5bab to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1a4e5487befd4035b98886cc2a7b5bab 2024-12-04T15:22:53,881 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1dec40a95be640ac86902523585a1ef3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1dec40a95be640ac86902523585a1ef3 2024-12-04T15:22:53,882 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2d6bc1e48bfb48c19349d050a70a108e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/2d6bc1e48bfb48c19349d050a70a108e 2024-12-04T15:22:53,883 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1c0c0ad797214b72adecfa75b2cb6f3b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1c0c0ad797214b72adecfa75b2cb6f3b 2024-12-04T15:22:53,884 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/947ee09864694768897adffd5f811fdd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/947ee09864694768897adffd5f811fdd 2024-12-04T15:22:53,885 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c06f34291a054fa5a40b89f089b85f5e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c06f34291a054fa5a40b89f089b85f5e 2024-12-04T15:22:53,885 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/4f78b9fad6bb4e4ea3ee1a999b01c039 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/4f78b9fad6bb4e4ea3ee1a999b01c039 2024-12-04T15:22:53,886 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c9c20f1c4896498fa2a98e79bd7739db to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/c9c20f1c4896498fa2a98e79bd7739db 2024-12-04T15:22:53,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f84b0e6a6e5c4a97b1248fc62cfd8c61 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/f84b0e6a6e5c4a97b1248fc62cfd8c61 2024-12-04T15:22:53,888 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/184227a6843d441e9d00ed01148180fb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/184227a6843d441e9d00ed01148180fb 2024-12-04T15:22:53,889 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/edb3a610c0ce4126be998667e3dc6e54 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/edb3a610c0ce4126be998667e3dc6e54 2024-12-04T15:22:53,890 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/440ee2cb93fa43239b66d6bf378759b8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/440ee2cb93fa43239b66d6bf378759b8 2024-12-04T15:22:53,890 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/610e42eb0f92456c9a3fad95b5542760 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/610e42eb0f92456c9a3fad95b5542760 2024-12-04T15:22:53,891 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/683960e3926f4c1f9b4aa52d4b526811 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/683960e3926f4c1f9b4aa52d4b526811 2024-12-04T15:22:53,892 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1deb1e7fd76449e3a1b85546b4e8e578 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/1deb1e7fd76449e3a1b85546b4e8e578 2024-12-04T15:22:53,893 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/214a8136d9494168a1dec614a494febb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/214a8136d9494168a1dec614a494febb 2024-12-04T15:22:53,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7e23486f29314d6a8b82d5ca0f9783b2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7e23486f29314d6a8b82d5ca0f9783b2 2024-12-04T15:22:53,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bd66fcfeb98041658604a60a264b713d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/bd66fcfeb98041658604a60a264b713d 2024-12-04T15:22:53,895 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5a1c2a32726a423b8b2c117640da9928 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/5a1c2a32726a423b8b2c117640da9928 2024-12-04T15:22:53,896 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dd2457c8bae04c38a95f95dedcb78037 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/dd2457c8bae04c38a95f95dedcb78037 2024-12-04T15:22:53,897 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61393d101e8a4650a0e374547cdec590 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/61393d101e8a4650a0e374547cdec590 2024-12-04T15:22:53,898 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/9159c91129674f6c9d056f33c553945f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/9159c91129674f6c9d056f33c553945f 2024-12-04T15:22:53,899 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/1d5ea073886547c9b56ec18696a379b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b7094e4c1f104b4db540dcb7cc8d344c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d028aae7e55c4b4a9ac56e50d7398569, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d626e4e437ce4c65aefbcc92c063f3b8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/34eae6a915184651abe4c974a748cd68, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2730ceb39a3947ecbbad074a99dfdff1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ab8c67397b1f410aafdcf80a5694e8cb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ad742e9ecc0c4883959b88ea9fdc0957, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/eda26424d85b46e4b6f6334442a47f5b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4c4d4dc4b4da4e01bf94078f334e0318, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2c119466f513469f98c7235117b8f873, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/12181b27d1a2475e811570bd9780d781, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/add500ba3243417d9cef7fb62d4d18fe, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/32d0e08da1794cc4b5d4a3d6f18dc7b5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6567d044e0e14f89b80fe4df0fd71aa3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6438228a917b4e0f9af30b7695dbe292, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/809f138c9112457ebc7c797598f8ed28, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/f4df746e73054e699bf6f3dfd121b1e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b4e3bcffb6394dc288f1d47c399fdde7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d52657b9096d4ecd853c5c3076763c7b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/aa82b2456f204f2bbfbf575d25ae0d90, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2b1a51a564f84286ac593507551da19e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c783de7dd83d4f958317dde70ca6ee16, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/25f62f9e96144cd8968d2317d736def7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b28228fed2274cc79237368d0e6b2142, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/9130181dcc0240ecbe816dc7be45f96f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c78dcd76dbf5464eafd6b6225b5cb891, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/37a4c9109d3947c8bb1a74030bc01332, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b5fb6e9d7ee747008b28b9105f5a7c2a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7e093f25fc1749a884a00df98323fb4c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/cdb4c0732aaa41baa1e781f3ae47e326, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/07769290b45c437e8a7e555256372eeb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/33944f70fbbb4b55ac7352ad0acd7ee9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2df1ff26828a4a2da581246559772604, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/fa9300beb43b415a93b621b5eb6555d4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d3c154d9434b4ca5b476a830f3902ccc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/3bebd741a32b4c729fb1e7fed2c3f1b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/58c2f8dcbbe94ddfbb20d7fe8b170d32, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ea5c38795f57494bbb59407d8fb4cebd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4cb28dd3e3814fc6b4b087758d39fca2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/97ba582478fe434cac6e809677527ed1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7203442a0388451cb030f6daa16121fa] to archive 2024-12-04T15:22:53,900 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:22:53,901 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/1d5ea073886547c9b56ec18696a379b6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/1d5ea073886547c9b56ec18696a379b6 2024-12-04T15:22:53,902 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b7094e4c1f104b4db540dcb7cc8d344c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b7094e4c1f104b4db540dcb7cc8d344c 2024-12-04T15:22:53,903 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d028aae7e55c4b4a9ac56e50d7398569 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d028aae7e55c4b4a9ac56e50d7398569 2024-12-04T15:22:53,904 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d626e4e437ce4c65aefbcc92c063f3b8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d626e4e437ce4c65aefbcc92c063f3b8 2024-12-04T15:22:53,904 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/34eae6a915184651abe4c974a748cd68 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/34eae6a915184651abe4c974a748cd68 2024-12-04T15:22:53,905 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2730ceb39a3947ecbbad074a99dfdff1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2730ceb39a3947ecbbad074a99dfdff1 2024-12-04T15:22:53,906 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ab8c67397b1f410aafdcf80a5694e8cb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ab8c67397b1f410aafdcf80a5694e8cb 2024-12-04T15:22:53,907 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ad742e9ecc0c4883959b88ea9fdc0957 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ad742e9ecc0c4883959b88ea9fdc0957 2024-12-04T15:22:53,908 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/eda26424d85b46e4b6f6334442a47f5b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/eda26424d85b46e4b6f6334442a47f5b 2024-12-04T15:22:53,908 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4c4d4dc4b4da4e01bf94078f334e0318 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4c4d4dc4b4da4e01bf94078f334e0318 2024-12-04T15:22:53,909 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2c119466f513469f98c7235117b8f873 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2c119466f513469f98c7235117b8f873 2024-12-04T15:22:53,910 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/12181b27d1a2475e811570bd9780d781 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/12181b27d1a2475e811570bd9780d781 2024-12-04T15:22:53,911 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/add500ba3243417d9cef7fb62d4d18fe to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/add500ba3243417d9cef7fb62d4d18fe 2024-12-04T15:22:53,911 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/32d0e08da1794cc4b5d4a3d6f18dc7b5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/32d0e08da1794cc4b5d4a3d6f18dc7b5 2024-12-04T15:22:53,912 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6567d044e0e14f89b80fe4df0fd71aa3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6567d044e0e14f89b80fe4df0fd71aa3 2024-12-04T15:22:53,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6438228a917b4e0f9af30b7695dbe292 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/6438228a917b4e0f9af30b7695dbe292 2024-12-04T15:22:53,914 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/809f138c9112457ebc7c797598f8ed28 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/809f138c9112457ebc7c797598f8ed28 2024-12-04T15:22:53,915 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/f4df746e73054e699bf6f3dfd121b1e5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/f4df746e73054e699bf6f3dfd121b1e5 2024-12-04T15:22:53,915 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b4e3bcffb6394dc288f1d47c399fdde7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b4e3bcffb6394dc288f1d47c399fdde7 2024-12-04T15:22:53,916 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d52657b9096d4ecd853c5c3076763c7b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d52657b9096d4ecd853c5c3076763c7b 2024-12-04T15:22:53,917 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/aa82b2456f204f2bbfbf575d25ae0d90 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/aa82b2456f204f2bbfbf575d25ae0d90 2024-12-04T15:22:53,918 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2b1a51a564f84286ac593507551da19e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2b1a51a564f84286ac593507551da19e 2024-12-04T15:22:53,919 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c783de7dd83d4f958317dde70ca6ee16 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c783de7dd83d4f958317dde70ca6ee16 2024-12-04T15:22:53,919 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/25f62f9e96144cd8968d2317d736def7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/25f62f9e96144cd8968d2317d736def7 2024-12-04T15:22:53,920 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b28228fed2274cc79237368d0e6b2142 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b28228fed2274cc79237368d0e6b2142 2024-12-04T15:22:53,921 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/9130181dcc0240ecbe816dc7be45f96f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/9130181dcc0240ecbe816dc7be45f96f 2024-12-04T15:22:53,922 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c78dcd76dbf5464eafd6b6225b5cb891 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/c78dcd76dbf5464eafd6b6225b5cb891 2024-12-04T15:22:53,923 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/37a4c9109d3947c8bb1a74030bc01332 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/37a4c9109d3947c8bb1a74030bc01332 2024-12-04T15:22:53,923 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b5fb6e9d7ee747008b28b9105f5a7c2a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/b5fb6e9d7ee747008b28b9105f5a7c2a 2024-12-04T15:22:53,924 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7e093f25fc1749a884a00df98323fb4c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7e093f25fc1749a884a00df98323fb4c 2024-12-04T15:22:53,925 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/cdb4c0732aaa41baa1e781f3ae47e326 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/cdb4c0732aaa41baa1e781f3ae47e326 2024-12-04T15:22:53,926 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/07769290b45c437e8a7e555256372eeb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/07769290b45c437e8a7e555256372eeb 2024-12-04T15:22:53,927 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/33944f70fbbb4b55ac7352ad0acd7ee9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/33944f70fbbb4b55ac7352ad0acd7ee9 2024-12-04T15:22:53,927 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2df1ff26828a4a2da581246559772604 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/2df1ff26828a4a2da581246559772604 2024-12-04T15:22:53,928 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/fa9300beb43b415a93b621b5eb6555d4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/fa9300beb43b415a93b621b5eb6555d4 2024-12-04T15:22:53,929 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d3c154d9434b4ca5b476a830f3902ccc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/d3c154d9434b4ca5b476a830f3902ccc 2024-12-04T15:22:53,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/3bebd741a32b4c729fb1e7fed2c3f1b6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/3bebd741a32b4c729fb1e7fed2c3f1b6 2024-12-04T15:22:53,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/58c2f8dcbbe94ddfbb20d7fe8b170d32 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/58c2f8dcbbe94ddfbb20d7fe8b170d32 2024-12-04T15:22:53,931 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ea5c38795f57494bbb59407d8fb4cebd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/ea5c38795f57494bbb59407d8fb4cebd 2024-12-04T15:22:53,932 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4cb28dd3e3814fc6b4b087758d39fca2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/4cb28dd3e3814fc6b4b087758d39fca2 2024-12-04T15:22:53,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/97ba582478fe434cac6e809677527ed1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/97ba582478fe434cac6e809677527ed1 2024-12-04T15:22:53,934 DEBUG [StoreCloser-TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7203442a0388451cb030f6daa16121fa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/7203442a0388451cb030f6daa16121fa 2024-12-04T15:22:53,937 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/recovered.edits/641.seqid, newMaxSeqId=641, maxSeqId=1 2024-12-04T15:22:53,938 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4. 2024-12-04T15:22:53,938 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for e068873424c7ff89600b835c0496bec4: 2024-12-04T15:22:53,939 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:53,940 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=e068873424c7ff89600b835c0496bec4, regionState=CLOSED 2024-12-04T15:22:53,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-04T15:22:53,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure e068873424c7ff89600b835c0496bec4, server=645c2dbfef2e,42169,1733325683856 in 1.5220 sec 2024-12-04T15:22:53,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-04T15:22:53,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e068873424c7ff89600b835c0496bec4, UNASSIGN in 1.5250 sec 2024-12-04T15:22:53,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-04T15:22:53,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5280 sec 2024-12-04T15:22:53,945 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325773945"}]},"ts":"1733325773945"} 2024-12-04T15:22:53,946 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-04T15:22:53,948 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-04T15:22:53,949 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5380 sec 2024-12-04T15:22:54,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-04T15:22:54,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-04T15:22:54,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-04T15:22:54,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,518 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-04T15:22:54,519 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,521 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:54,523 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/recovered.edits] 2024-12-04T15:22:54,525 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/564324b9334b46a3914f32d6fe8717ce to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/564324b9334b46a3914f32d6fe8717ce 2024-12-04T15:22:54,526 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/bff6df7b73d84bcfbd8990d3da969f3c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/bff6df7b73d84bcfbd8990d3da969f3c 2024-12-04T15:22:54,527 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/ec95e325a7cb43608893f365f586a8f0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/A/ec95e325a7cb43608893f365f586a8f0 2024-12-04T15:22:54,529 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a61388154dd473e91e35f52f8e6ccd8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/7a61388154dd473e91e35f52f8e6ccd8 2024-12-04T15:22:54,530 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/88c2d7490d4649328d5bb151d59ef697 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/88c2d7490d4649328d5bb151d59ef697 2024-12-04T15:22:54,531 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/a4ca79f36154491eb4db9cb5a05b6d5b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/B/a4ca79f36154491eb4db9cb5a05b6d5b 2024-12-04T15:22:54,534 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/0863ae0d0c9d417f89cce57a10953f27 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/0863ae0d0c9d417f89cce57a10953f27 2024-12-04T15:22:54,535 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/20f555214b90400da9cb441fdb72f533 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/20f555214b90400da9cb441fdb72f533 2024-12-04T15:22:54,536 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/50be6bd79f854bb886cdfa22daa964c1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/C/50be6bd79f854bb886cdfa22daa964c1 2024-12-04T15:22:54,538 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/recovered.edits/641.seqid to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4/recovered.edits/641.seqid 2024-12-04T15:22:54,539 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/e068873424c7ff89600b835c0496bec4 2024-12-04T15:22:54,539 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-04T15:22:54,541 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,545 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-04T15:22:54,547 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-04T15:22:54,548 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,548 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-04T15:22:54,548 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733325774548"}]},"ts":"9223372036854775807"} 2024-12-04T15:22:54,549 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-04T15:22:54,549 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e068873424c7ff89600b835c0496bec4, NAME => 'TestAcidGuarantees,,1733325742879.e068873424c7ff89600b835c0496bec4.', STARTKEY => '', ENDKEY => ''}] 2024-12-04T15:22:54,549 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-04T15:22:54,550 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733325774549"}]},"ts":"9223372036854775807"} 2024-12-04T15:22:54,551 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-04T15:22:54,553 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 37 msec 2024-12-04T15:22:54,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-04T15:22:54,620 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-04T15:22:54,630 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 240), OpenFileDescriptor=449 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=776 (was 915), ProcessCount=11 (was 11), AvailableMemoryMB=4157 (was 3279) - AvailableMemoryMB LEAK? - 2024-12-04T15:22:54,639 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=776, ProcessCount=11, AvailableMemoryMB=4158 2024-12-04T15:22:54,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:22:54,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:22:54,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:54,643 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:22:54,643 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:54,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-12-04T15:22:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-04T15:22:54,644 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:22:54,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742227_1403 (size=963) 2024-12-04T15:22:54,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-04T15:22:54,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-04T15:22:55,051 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:22:55,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742228_1404 (size=53) 2024-12-04T15:22:55,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-04T15:22:55,457 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:22:55,457 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 96aa8a9c538d7176a93d416eb9d9bfac, disabling compactions & flushes 2024-12-04T15:22:55,457 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,457 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,457 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. after waiting 0 ms 2024-12-04T15:22:55,457 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,457 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,457 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:22:55,458 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:22:55,458 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733325775458"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325775458"}]},"ts":"1733325775458"} 2024-12-04T15:22:55,459 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:22:55,460 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:22:55,460 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325775460"}]},"ts":"1733325775460"} 2024-12-04T15:22:55,461 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-04T15:22:55,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, ASSIGN}] 2024-12-04T15:22:55,465 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, ASSIGN 2024-12-04T15:22:55,466 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:22:55,616 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:55,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:22:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-04T15:22:55,769 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:55,773 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,773 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:22:55,774 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,774 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:22:55,774 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,774 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,775 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,777 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:55,777 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96aa8a9c538d7176a93d416eb9d9bfac columnFamilyName A 2024-12-04T15:22:55,777 DEBUG [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:55,778 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(327): Store=96aa8a9c538d7176a93d416eb9d9bfac/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:55,778 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,779 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:55,779 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96aa8a9c538d7176a93d416eb9d9bfac columnFamilyName B 2024-12-04T15:22:55,779 DEBUG [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:55,780 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(327): Store=96aa8a9c538d7176a93d416eb9d9bfac/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:55,780 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,780 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:55,781 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96aa8a9c538d7176a93d416eb9d9bfac columnFamilyName C 2024-12-04T15:22:55,781 DEBUG [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:55,781 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(327): Store=96aa8a9c538d7176a93d416eb9d9bfac/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:55,781 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,782 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,782 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,783 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:22:55,784 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:55,786 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:22:55,786 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened 96aa8a9c538d7176a93d416eb9d9bfac; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71874528, jitterRate=0.0710139274597168}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:22:55,786 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:22:55,787 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., pid=98, masterSystemTime=1733325775769 2024-12-04T15:22:55,788 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,789 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:55,789 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:55,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-04T15:22:55,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 in 172 msec 2024-12-04T15:22:55,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-04T15:22:55,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, ASSIGN in 327 msec 2024-12-04T15:22:55,793 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:22:55,793 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325775793"}]},"ts":"1733325775793"} 2024-12-04T15:22:55,794 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-04T15:22:55,796 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:22:55,799 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-12-04T15:22:56,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-04T15:22:56,748 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-04T15:22:56,750 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2914d173 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3de0ad51 2024-12-04T15:22:56,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f5b8ac9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:56,761 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:56,762 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:56,766 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:22:56,768 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46536, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:22:56,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:22:56,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:22:56,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-04T15:22:56,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742229_1405 (size=999) 2024-12-04T15:22:57,197 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-04T15:22:57,197 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-04T15:22:57,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:22:57,203 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, REOPEN/MOVE}] 2024-12-04T15:22:57,204 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, REOPEN/MOVE 2024-12-04T15:22:57,205 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,206 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:22:57,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:22:57,357 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,358 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,358 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:22:57,358 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 96aa8a9c538d7176a93d416eb9d9bfac, disabling compactions & flushes 2024-12-04T15:22:57,358 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,358 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,358 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. after waiting 0 ms 2024-12-04T15:22:57,358 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,363 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-04T15:22:57,363 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,364 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:22:57,364 WARN [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: 96aa8a9c538d7176a93d416eb9d9bfac to self. 2024-12-04T15:22:57,365 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,365 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=CLOSED 2024-12-04T15:22:57,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-04T15:22:57,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 in 160 msec 2024-12-04T15:22:57,368 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, REOPEN/MOVE; state=CLOSED, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=true 2024-12-04T15:22:57,519 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:22:57,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,676 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,676 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:22:57,677 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,677 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:22:57,677 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,677 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,678 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,679 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:57,679 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96aa8a9c538d7176a93d416eb9d9bfac columnFamilyName A 2024-12-04T15:22:57,681 DEBUG [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:57,681 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(327): Store=96aa8a9c538d7176a93d416eb9d9bfac/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:57,682 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,683 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:57,683 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96aa8a9c538d7176a93d416eb9d9bfac columnFamilyName B 2024-12-04T15:22:57,683 DEBUG [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:57,684 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(327): Store=96aa8a9c538d7176a93d416eb9d9bfac/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:57,684 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,684 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:22:57,685 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96aa8a9c538d7176a93d416eb9d9bfac columnFamilyName C 2024-12-04T15:22:57,685 DEBUG [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:57,685 INFO [StoreOpener-96aa8a9c538d7176a93d416eb9d9bfac-1 {}] regionserver.HStore(327): Store=96aa8a9c538d7176a93d416eb9d9bfac/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:22:57,685 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,688 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,693 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,695 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:22:57,696 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,700 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 96aa8a9c538d7176a93d416eb9d9bfac; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68839756, jitterRate=0.025792300701141357}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:22:57,701 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:22:57,702 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., pid=103, masterSystemTime=1733325777672 2024-12-04T15:22:57,703 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,704 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,704 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=OPEN, openSeqNum=5, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-04T15:22:57,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 in 185 msec 2024-12-04T15:22:57,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-04T15:22:57,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, REOPEN/MOVE in 503 msec 2024-12-04T15:22:57,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-04T15:22:57,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 507 msec 2024-12-04T15:22:57,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 934 msec 2024-12-04T15:22:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-04T15:22:57,714 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0733701c to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f01df8e 2024-12-04T15:22:57,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cbf0765, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,727 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x051f8b5d to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3453c9df 2024-12-04T15:22:57,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4023641f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,734 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x25ec2d5b to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f5b0508 2024-12-04T15:22:57,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54838b60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c4acc48 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b6ee38c 2024-12-04T15:22:57,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53fea30a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,753 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x047ef789 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68e01609 2024-12-04T15:22:57,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e200eec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,763 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x299dc25b to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6784feea 2024-12-04T15:22:57,769 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70fdc389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,770 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x585edb09 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@186d0d46 2024-12-04T15:22:57,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6926449, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,774 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x056c82ce to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@751ba818 2024-12-04T15:22:57,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@603e2ade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,783 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e7f0457 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@260c26e8 2024-12-04T15:22:57,790 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37f5802d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,791 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a7db91c to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d81e5dd 2024-12-04T15:22:57,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56eee0a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:22:57,806 DEBUG [hconnection-0x1d076514-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,808 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,808 DEBUG [hconnection-0x5ad12f6c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,809 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46802, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:22:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-04T15:22:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:22:57,822 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:22:57,823 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:22:57,824 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:22:57,824 DEBUG [hconnection-0x471ddeb6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,825 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,828 DEBUG [hconnection-0x304a246f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,829 DEBUG [hconnection-0x73bc41b3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,830 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,830 DEBUG [hconnection-0x15d8a6c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,831 DEBUG [hconnection-0x2927f88c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,832 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,836 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,839 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325837887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325837887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,889 DEBUG [hconnection-0x6a704d65-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325837889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,890 DEBUG [hconnection-0x410ec2d4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,891 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,891 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,895 DEBUG [hconnection-0x65e264c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:22:57,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325837896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325837902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,902 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:22:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:22:57,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c01262badb4d475aa08a8e206dc022ad_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325777837/Put/seqid=0 2024-12-04T15:22:57,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742230_1406 (size=12154) 2024-12-04T15:22:57,963 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:57,967 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c01262badb4d475aa08a8e206dc022ad_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c01262badb4d475aa08a8e206dc022ad_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:57,967 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/36817bd8bc2543fdb4557a56a7a58e7d, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:22:57,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/36817bd8bc2543fdb4557a56a7a58e7d is 175, key is test_row_0/A:col10/1733325777837/Put/seqid=0 2024-12-04T15:22:57,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:57,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:57,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:57,978 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:57,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:57,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742231_1407 (size=30955) 2024-12-04T15:22:57,985 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/36817bd8bc2543fdb4557a56a7a58e7d 2024-12-04T15:22:57,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325837989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325837989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:57,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325837990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325837998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325838003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/d7eeab28245041998d07f546bfe86365 is 50, key is test_row_0/B:col10/1733325777837/Put/seqid=0 2024-12-04T15:22:58,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742232_1408 (size=12001) 2024-12-04T15:22:58,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:22:58,131 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325838191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325838191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325838193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325838202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325838204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,284 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:58,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:58,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/d7eeab28245041998d07f546bfe86365 2024-12-04T15:22:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:22:58,438 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:58,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:58,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/52358fa9c3244cce987ad0040dc9d26b is 50, key is test_row_0/C:col10/1733325777837/Put/seqid=0 2024-12-04T15:22:58,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742233_1409 (size=12001) 2024-12-04T15:22:58,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/52358fa9c3244cce987ad0040dc9d26b 2024-12-04T15:22:58,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/36817bd8bc2543fdb4557a56a7a58e7d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d 2024-12-04T15:22:58,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d, entries=150, sequenceid=16, filesize=30.2 K 2024-12-04T15:22:58,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/d7eeab28245041998d07f546bfe86365 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d7eeab28245041998d07f546bfe86365 2024-12-04T15:22:58,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d7eeab28245041998d07f546bfe86365, entries=150, sequenceid=16, filesize=11.7 K 2024-12-04T15:22:58,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/52358fa9c3244cce987ad0040dc9d26b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/52358fa9c3244cce987ad0040dc9d26b 2024-12-04T15:22:58,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/52358fa9c3244cce987ad0040dc9d26b, entries=150, sequenceid=16, filesize=11.7 K 2024-12-04T15:22:58,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 96aa8a9c538d7176a93d416eb9d9bfac in 626ms, sequenceid=16, compaction requested=false 2024-12-04T15:22:58,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:22:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:58,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-04T15:22:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:22:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:22:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:22:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:58,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325838507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204394d6fe5a1f7479c88aac4055d1df28b_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325777886/Put/seqid=0 2024-12-04T15:22:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325838507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325838507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325838509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325838510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742234_1410 (size=14594) 2024-12-04T15:22:58,519 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:58,525 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204394d6fe5a1f7479c88aac4055d1df28b_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204394d6fe5a1f7479c88aac4055d1df28b_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:58,526 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3ccf86a2938d463b9899542dbbe7d366, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:22:58,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3ccf86a2938d463b9899542dbbe7d366 is 175, key is test_row_0/A:col10/1733325777886/Put/seqid=0 2024-12-04T15:22:58,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742235_1411 (size=39549) 2024-12-04T15:22:58,590 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:58,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:58,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325838612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325838613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325838616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325838616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,743 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:58,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:58,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325838818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325838819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325838820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:58,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325838821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:58,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:58,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:58,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:22:58,941 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3ccf86a2938d463b9899542dbbe7d366 2024-12-04T15:22:58,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/79ed233685b14c5091c96ca25d3978c0 is 50, key is test_row_0/B:col10/1733325777886/Put/seqid=0 2024-12-04T15:22:58,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742236_1412 (size=12001) 2024-12-04T15:22:58,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/79ed233685b14c5091c96ca25d3978c0 2024-12-04T15:22:58,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/a5baf56476f048158fa26575623df910 is 50, key is test_row_0/C:col10/1733325777886/Put/seqid=0 2024-12-04T15:22:58,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742237_1413 (size=12001) 2024-12-04T15:22:58,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/a5baf56476f048158fa26575623df910 2024-12-04T15:22:58,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3ccf86a2938d463b9899542dbbe7d366 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366 2024-12-04T15:22:58,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366, entries=200, sequenceid=43, filesize=38.6 K 2024-12-04T15:22:58,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/79ed233685b14c5091c96ca25d3978c0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/79ed233685b14c5091c96ca25d3978c0 2024-12-04T15:22:59,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/79ed233685b14c5091c96ca25d3978c0, entries=150, sequenceid=43, filesize=11.7 K 2024-12-04T15:22:59,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/a5baf56476f048158fa26575623df910 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a5baf56476f048158fa26575623df910 2024-12-04T15:22:59,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a5baf56476f048158fa26575623df910, entries=150, sequenceid=43, filesize=11.7 K 2024-12-04T15:22:59,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 96aa8a9c538d7176a93d416eb9d9bfac in 508ms, sequenceid=43, compaction requested=false 2024-12-04T15:22:59,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:22:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:59,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:22:59,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:22:59,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:59,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:22:59,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:59,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:22:59,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:22:59,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042ac62c28f4f145b4bdfa1391bdb2ab1d_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325778506/Put/seqid=0 2024-12-04T15:22:59,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742238_1414 (size=14594) 2024-12-04T15:22:59,050 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325839126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325839131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325839132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325839133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325839133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,202 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325839234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325839237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325839239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325839240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325839240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,355 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,437 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:22:59,441 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042ac62c28f4f145b4bdfa1391bdb2ab1d_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042ac62c28f4f145b4bdfa1391bdb2ab1d_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:22:59,441 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/c09d8f40849f4609a393de78a9f9b701, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:22:59,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325839437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/c09d8f40849f4609a393de78a9f9b701 is 175, key is test_row_0/A:col10/1733325778506/Put/seqid=0 2024-12-04T15:22:59,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742239_1415 (size=39549) 2024-12-04T15:22:59,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325839442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325839442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325839445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325839445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,475 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T15:22:59,508 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325839743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325839749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325839749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325839750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:22:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325839751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,813 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,847 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/c09d8f40849f4609a393de78a9f9b701 2024-12-04T15:22:59,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1024cc3ff2f34bc3b8dace57707ac27b is 50, key is test_row_0/B:col10/1733325778506/Put/seqid=0 2024-12-04T15:22:59,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742240_1416 (size=12001) 2024-12-04T15:22:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:22:59,966 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:22:59,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:22:59,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:22:59,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:22:59,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:22:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:23:00,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:00,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:00,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325840245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325840252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1024cc3ff2f34bc3b8dace57707ac27b 2024-12-04T15:23:00,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:00,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325840256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:00,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325840258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325840261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/4d7245d556264f88bbd81491ec11c59b is 50, key is test_row_0/C:col10/1733325778506/Put/seqid=0 2024-12-04T15:23:00,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:23:00,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:00,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742241_1417 (size=12001) 2024-12-04T15:23:00,425 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:23:00,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:00,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,579 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:23:00,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:00,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:00,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/4d7245d556264f88bbd81491ec11c59b 2024-12-04T15:23:00,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/c09d8f40849f4609a393de78a9f9b701 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701 2024-12-04T15:23:00,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701, entries=200, sequenceid=54, filesize=38.6 K 2024-12-04T15:23:00,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1024cc3ff2f34bc3b8dace57707ac27b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1024cc3ff2f34bc3b8dace57707ac27b 2024-12-04T15:23:00,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1024cc3ff2f34bc3b8dace57707ac27b, entries=150, sequenceid=54, filesize=11.7 K 2024-12-04T15:23:00,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/4d7245d556264f88bbd81491ec11c59b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d7245d556264f88bbd81491ec11c59b 2024-12-04T15:23:00,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d7245d556264f88bbd81491ec11c59b, entries=150, sequenceid=54, filesize=11.7 K 2024-12-04T15:23:00,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 96aa8a9c538d7176a93d416eb9d9bfac in 1697ms, sequenceid=54, compaction requested=true 2024-12-04T15:23:00,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:00,722 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:00,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:00,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:00,722 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:00,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:00,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:00,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:00,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:00,724 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110053 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:00,724 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:00,724 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,724 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=107.5 K 2024-12-04T15:23:00,724 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,724 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701] 2024-12-04T15:23:00,724 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36817bd8bc2543fdb4557a56a7a58e7d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733325777837 2024-12-04T15:23:00,725 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ccf86a2938d463b9899542dbbe7d366, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733325777886 2024-12-04T15:23:00,725 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:00,725 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:00,725 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,725 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d7eeab28245041998d07f546bfe86365, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/79ed233685b14c5091c96ca25d3978c0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1024cc3ff2f34bc3b8dace57707ac27b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=35.2 K 2024-12-04T15:23:00,726 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c09d8f40849f4609a393de78a9f9b701, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733325778506 2024-12-04T15:23:00,726 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d7eeab28245041998d07f546bfe86365, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733325777837 2024-12-04T15:23:00,726 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 79ed233685b14c5091c96ca25d3978c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733325777886 2024-12-04T15:23:00,727 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1024cc3ff2f34bc3b8dace57707ac27b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733325778506 2024-12-04T15:23:00,733 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:00,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-04T15:23:00,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,733 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:23:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:00,740 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:00,743 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#364 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:00,744 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/81388d8563f346f7b57e7172387e901f is 50, key is test_row_0/B:col10/1733325778506/Put/seqid=0 2024-12-04T15:23:00,748 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204c970c6cbf49b46759cbbf8478554f95b_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:00,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204c970c6cbf49b46759cbbf8478554f95b_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:00,751 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c970c6cbf49b46759cbbf8478554f95b_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:00,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742242_1418 (size=12104) 2024-12-04T15:23:00,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204004742bfe52c4f609694f5e54e994c61_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325779067/Put/seqid=0 2024-12-04T15:23:00,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742243_1419 (size=4469) 2024-12-04T15:23:00,771 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#363 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:00,772 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/de1666a8facf4903908756dc23dc87d2 is 175, key is test_row_0/A:col10/1733325778506/Put/seqid=0 2024-12-04T15:23:00,775 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/81388d8563f346f7b57e7172387e901f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/81388d8563f346f7b57e7172387e901f 2024-12-04T15:23:00,781 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 81388d8563f346f7b57e7172387e901f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:00,781 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:00,781 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=13, startTime=1733325780722; duration=0sec 2024-12-04T15:23:00,781 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:00,781 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:00,781 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:00,782 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:00,782 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:00,782 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:00,782 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/52358fa9c3244cce987ad0040dc9d26b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a5baf56476f048158fa26575623df910, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d7245d556264f88bbd81491ec11c59b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=35.2 K 2024-12-04T15:23:00,783 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 52358fa9c3244cce987ad0040dc9d26b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733325777837 2024-12-04T15:23:00,784 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a5baf56476f048158fa26575623df910, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733325777886 2024-12-04T15:23:00,789 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d7245d556264f88bbd81491ec11c59b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733325778506 2024-12-04T15:23:00,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742244_1420 (size=12154) 2024-12-04T15:23:00,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:00,799 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204004742bfe52c4f609694f5e54e994c61_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204004742bfe52c4f609694f5e54e994c61_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:00,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/6144d61a486549ab9b6ae479d4b433c5, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:00,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/6144d61a486549ab9b6ae479d4b433c5 is 175, key is test_row_0/A:col10/1733325779067/Put/seqid=0 2024-12-04T15:23:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742245_1421 (size=31058) 2024-12-04T15:23:00,825 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#366 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:00,825 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/de1666a8facf4903908756dc23dc87d2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/de1666a8facf4903908756dc23dc87d2 2024-12-04T15:23:00,825 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/c904ca0f481f474b87642395414201fb is 50, key is test_row_0/C:col10/1733325778506/Put/seqid=0 2024-12-04T15:23:00,832 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into de1666a8facf4903908756dc23dc87d2(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:00,832 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:00,832 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=13, startTime=1733325780722; duration=0sec 2024-12-04T15:23:00,832 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:00,832 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:00,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742246_1422 (size=30955) 2024-12-04T15:23:00,843 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/6144d61a486549ab9b6ae479d4b433c5 2024-12-04T15:23:00,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1e5fdb41dd2f46c294fec8393610276e is 50, key is test_row_0/B:col10/1733325779067/Put/seqid=0 2024-12-04T15:23:00,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742247_1423 (size=12104) 2024-12-04T15:23:00,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742248_1424 (size=12001) 2024-12-04T15:23:00,881 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/c904ca0f481f474b87642395414201fb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/c904ca0f481f474b87642395414201fb 2024-12-04T15:23:00,888 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into c904ca0f481f474b87642395414201fb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:00,888 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:00,888 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=13, startTime=1733325780723; duration=0sec 2024-12-04T15:23:00,888 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:00,888 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:01,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:01,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325841271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325841271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325841272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325841272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325841273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,279 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1e5fdb41dd2f46c294fec8393610276e 2024-12-04T15:23:01,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/ac814908fbfe439a8b764455eb76cf9c is 50, key is test_row_0/C:col10/1733325779067/Put/seqid=0 2024-12-04T15:23:01,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742249_1425 (size=12001) 2024-12-04T15:23:01,295 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/ac814908fbfe439a8b764455eb76cf9c 2024-12-04T15:23:01,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/6144d61a486549ab9b6ae479d4b433c5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5 2024-12-04T15:23:01,305 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5, entries=150, sequenceid=79, filesize=30.2 K 2024-12-04T15:23:01,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1e5fdb41dd2f46c294fec8393610276e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1e5fdb41dd2f46c294fec8393610276e 2024-12-04T15:23:01,311 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1e5fdb41dd2f46c294fec8393610276e, entries=150, sequenceid=79, filesize=11.7 K 2024-12-04T15:23:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/ac814908fbfe439a8b764455eb76cf9c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ac814908fbfe439a8b764455eb76cf9c 2024-12-04T15:23:01,315 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ac814908fbfe439a8b764455eb76cf9c, entries=150, sequenceid=79, filesize=11.7 K 2024-12-04T15:23:01,316 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 96aa8a9c538d7176a93d416eb9d9bfac in 583ms, sequenceid=79, compaction requested=false 2024-12-04T15:23:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-04T15:23:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-04T15:23:01,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-04T15:23:01,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4940 sec 2024-12-04T15:23:01,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 3.5010 sec 2024-12-04T15:23:01,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:01,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:23:01,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:01,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:01,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:01,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:01,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:01,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:01,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c87148fef8114a848634fe064891ac85_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:01,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742250_1426 (size=17034) 2024-12-04T15:23:01,398 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:01,403 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c87148fef8114a848634fe064891ac85_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c87148fef8114a848634fe064891ac85_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:01,405 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:01,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0 is 175, key is test_row_0/A:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:01,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742251_1427 (size=48139) 2024-12-04T15:23:01,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325841409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325841417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325841419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325841419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325841521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325841521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325841529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325841529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325841727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325841728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325841735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:01,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325841736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:01,811 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0 2024-12-04T15:23:01,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/8c76d1f8ef654b0b98f1f43195ed9ea3 is 50, key is test_row_0/B:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:01,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742252_1428 (size=12001) 2024-12-04T15:23:01,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/8c76d1f8ef654b0b98f1f43195ed9ea3 2024-12-04T15:23:01,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/949abcf55d0544b0b6aadbe3244738c0 is 50, key is test_row_0/C:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:01,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742253_1429 (size=12001) 2024-12-04T15:23:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-04T15:23:01,928 INFO [Thread-1809 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-04T15:23:01,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:01,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-04T15:23:01,931 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:01,932 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:01,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-04T15:23:02,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-04T15:23:02,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325842031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325842034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325842043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325842045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,084 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-04T15:23:02,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:02,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:02,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:02,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-04T15:23:02,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/949abcf55d0544b0b6aadbe3244738c0 2024-12-04T15:23:02,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-04T15:23:02,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:02,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:02,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:02,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:02,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0 2024-12-04T15:23:02,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0, entries=250, sequenceid=95, filesize=47.0 K 2024-12-04T15:23:02,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/8c76d1f8ef654b0b98f1f43195ed9ea3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8c76d1f8ef654b0b98f1f43195ed9ea3 2024-12-04T15:23:02,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8c76d1f8ef654b0b98f1f43195ed9ea3, entries=150, sequenceid=95, filesize=11.7 K 2024-12-04T15:23:02,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/949abcf55d0544b0b6aadbe3244738c0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/949abcf55d0544b0b6aadbe3244738c0 2024-12-04T15:23:02,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/949abcf55d0544b0b6aadbe3244738c0, entries=150, sequenceid=95, filesize=11.7 K 2024-12-04T15:23:02,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 96aa8a9c538d7176a93d416eb9d9bfac in 884ms, sequenceid=95, compaction requested=true 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:02,263 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:02,263 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:02,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:02,264 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110152 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:02,264 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:02,264 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:02,264 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,264 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:02,264 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/de1666a8facf4903908756dc23dc87d2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=107.6 K 2024-12-04T15:23:02,264 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,264 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/de1666a8facf4903908756dc23dc87d2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0] 2024-12-04T15:23:02,265 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting de1666a8facf4903908756dc23dc87d2, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733325778506 2024-12-04T15:23:02,265 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6144d61a486549ab9b6ae479d4b433c5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733325779067 2024-12-04T15:23:02,265 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,265 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/81388d8563f346f7b57e7172387e901f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1e5fdb41dd2f46c294fec8393610276e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8c76d1f8ef654b0b98f1f43195ed9ea3] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=35.3 K 2024-12-04T15:23:02,266 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a65aadcbaa6d4ea7b3d4b0c33e4138b0, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325781265 2024-12-04T15:23:02,267 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 81388d8563f346f7b57e7172387e901f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733325778506 2024-12-04T15:23:02,268 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e5fdb41dd2f46c294fec8393610276e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733325779067 2024-12-04T15:23:02,269 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c76d1f8ef654b0b98f1f43195ed9ea3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325781265 2024-12-04T15:23:02,277 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:02,279 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#373 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:02,279 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412043d6d45389e4e4f90b06a11f5aea76ea5_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:02,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/36cb77704df048a18ba4d2c8a46b12f9 is 50, key is test_row_0/B:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:02,281 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412043d6d45389e4e4f90b06a11f5aea76ea5_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:02,281 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412043d6d45389e4e4f90b06a11f5aea76ea5_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:02,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742254_1430 (size=12207) 2024-12-04T15:23:02,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742255_1431 (size=4469) 2024-12-04T15:23:02,292 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#372 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:02,293 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/7b720c7f5c8a4a6c945ae1a549a84a81 is 175, key is test_row_0/A:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:02,294 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/36cb77704df048a18ba4d2c8a46b12f9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/36cb77704df048a18ba4d2c8a46b12f9 2024-12-04T15:23:02,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742256_1432 (size=31161) 2024-12-04T15:23:02,300 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 36cb77704df048a18ba4d2c8a46b12f9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:02,300 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:02,300 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=13, startTime=1733325782263; duration=0sec 2024-12-04T15:23:02,300 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:02,300 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:02,301 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:02,303 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:02,304 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:02,304 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,304 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/c904ca0f481f474b87642395414201fb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ac814908fbfe439a8b764455eb76cf9c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/949abcf55d0544b0b6aadbe3244738c0] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=35.3 K 2024-12-04T15:23:02,305 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c904ca0f481f474b87642395414201fb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733325778506 2024-12-04T15:23:02,306 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ac814908fbfe439a8b764455eb76cf9c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733325779067 2024-12-04T15:23:02,306 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 949abcf55d0544b0b6aadbe3244738c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325781265 2024-12-04T15:23:02,309 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/7b720c7f5c8a4a6c945ae1a549a84a81 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7b720c7f5c8a4a6c945ae1a549a84a81 2024-12-04T15:23:02,317 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into 7b720c7f5c8a4a6c945ae1a549a84a81(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:02,317 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:02,317 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=13, startTime=1733325782263; duration=0sec 2024-12-04T15:23:02,317 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:02,317 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:02,321 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#374 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:02,322 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/684a7becfc6f4c7cb17a71a74d5793af is 50, key is test_row_0/C:col10/1733325781265/Put/seqid=0 2024-12-04T15:23:02,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742257_1433 (size=12207) 2024-12-04T15:23:02,339 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/684a7becfc6f4c7cb17a71a74d5793af as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/684a7becfc6f4c7cb17a71a74d5793af 2024-12-04T15:23:02,345 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 684a7becfc6f4c7cb17a71a74d5793af(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:02,345 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:02,345 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=13, startTime=1733325782263; duration=0sec 2024-12-04T15:23:02,345 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:02,345 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:02,396 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-04T15:23:02,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:02,398 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:02,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120401394fc385be4670b57000dab1005237_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325781405/Put/seqid=0 2024-12-04T15:23:02,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742258_1434 (size=12154) 2024-12-04T15:23:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-04T15:23:02,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:02,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:02,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325842554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325842555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325842558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325842559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325842660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325842661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325842662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325842666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:02,818 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120401394fc385be4670b57000dab1005237_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120401394fc385be4670b57000dab1005237_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:02,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/da6ee0c75ed043a0815f30c5843aee85, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:02,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/da6ee0c75ed043a0815f30c5843aee85 is 175, key is test_row_0/A:col10/1733325781405/Put/seqid=0 2024-12-04T15:23:02,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742259_1435 (size=30955) 2024-12-04T15:23:02,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325842865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325842867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325842867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:02,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:02,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325842871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-04T15:23:03,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325843173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325843173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325843175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325843177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,236 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/da6ee0c75ed043a0815f30c5843aee85 2024-12-04T15:23:03,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7e91d9f45d664b488715528c78b424ee is 50, key is test_row_0/B:col10/1733325781405/Put/seqid=0 2024-12-04T15:23:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742260_1436 (size=12001) 2024-12-04T15:23:03,281 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7e91d9f45d664b488715528c78b424ee 2024-12-04T15:23:03,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/7a291a18a4cb4f308670a09a159e2a82 is 50, key is test_row_0/C:col10/1733325781405/Put/seqid=0 2024-12-04T15:23:03,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325843303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,308 DEBUG [Thread-1801 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:23:03,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742261_1437 (size=12001) 2024-12-04T15:23:03,318 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/7a291a18a4cb4f308670a09a159e2a82 2024-12-04T15:23:03,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/da6ee0c75ed043a0815f30c5843aee85 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85 2024-12-04T15:23:03,335 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85, entries=150, sequenceid=121, filesize=30.2 K 2024-12-04T15:23:03,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7e91d9f45d664b488715528c78b424ee as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7e91d9f45d664b488715528c78b424ee 2024-12-04T15:23:03,339 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7e91d9f45d664b488715528c78b424ee, entries=150, sequenceid=121, filesize=11.7 K 2024-12-04T15:23:03,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/7a291a18a4cb4f308670a09a159e2a82 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7a291a18a4cb4f308670a09a159e2a82 2024-12-04T15:23:03,359 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7a291a18a4cb4f308670a09a159e2a82, entries=150, sequenceid=121, filesize=11.7 K 2024-12-04T15:23:03,360 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 96aa8a9c538d7176a93d416eb9d9bfac in 961ms, sequenceid=121, compaction requested=false 2024-12-04T15:23:03,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:03,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:03,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-04T15:23:03,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-04T15:23:03,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-04T15:23:03,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4290 sec 2024-12-04T15:23:03,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.4330 sec 2024-12-04T15:23:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:03,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:23:03,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:03,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:03,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:03,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:03,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:03,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:03,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204b8706a6d514e4268b7d94a2423d7cf81_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:03,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325843752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325843753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325843759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325843763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742262_1438 (size=14794) 2024-12-04T15:23:03,784 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:03,793 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204b8706a6d514e4268b7d94a2423d7cf81_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204b8706a6d514e4268b7d94a2423d7cf81_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:03,794 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/92092c1f3af945d088ce7bd732245a83, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:03,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/92092c1f3af945d088ce7bd732245a83 is 175, key is test_row_0/A:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:03,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742263_1439 (size=39749) 2024-12-04T15:23:03,838 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/92092c1f3af945d088ce7bd732245a83 2024-12-04T15:23:03,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/330396111f574a768e4e28a1280f1e0c is 50, key is test_row_0/B:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:03,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325843864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325843864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742264_1440 (size=12151) 2024-12-04T15:23:03,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325843878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325843878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:03,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/330396111f574a768e4e28a1280f1e0c 2024-12-04T15:23:03,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3350d1cc23024b38aa475de4c79ce3f9 is 50, key is test_row_0/C:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:03,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742265_1441 (size=12151) 2024-12-04T15:23:03,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3350d1cc23024b38aa475de4c79ce3f9 2024-12-04T15:23:03,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/92092c1f3af945d088ce7bd732245a83 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83 2024-12-04T15:23:03,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83, entries=200, sequenceid=136, filesize=38.8 K 2024-12-04T15:23:03,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/330396111f574a768e4e28a1280f1e0c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/330396111f574a768e4e28a1280f1e0c 2024-12-04T15:23:03,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/330396111f574a768e4e28a1280f1e0c, entries=150, sequenceid=136, filesize=11.9 K 2024-12-04T15:23:03,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3350d1cc23024b38aa475de4c79ce3f9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3350d1cc23024b38aa475de4c79ce3f9 2024-12-04T15:23:03,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3350d1cc23024b38aa475de4c79ce3f9, entries=150, sequenceid=136, filesize=11.9 K 2024-12-04T15:23:03,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 96aa8a9c538d7176a93d416eb9d9bfac in 287ms, sequenceid=136, compaction requested=true 2024-12-04T15:23:03,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:03,976 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:03,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:03,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:03,977 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:03,977 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7b720c7f5c8a4a6c945ae1a549a84a81, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=99.5 K 2024-12-04T15:23:03,977 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:03,977 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7b720c7f5c8a4a6c945ae1a549a84a81, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83] 2024-12-04T15:23:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:03,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:03,978 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b720c7f5c8a4a6c945ae1a549a84a81, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325781265 2024-12-04T15:23:03,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting da6ee0c75ed043a0815f30c5843aee85, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733325781405 2024-12-04T15:23:03,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92092c1f3af945d088ce7bd732245a83, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733325782553 2024-12-04T15:23:03,980 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:03,980 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:03,980 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:03,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:03,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:03,980 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/36cb77704df048a18ba4d2c8a46b12f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7e91d9f45d664b488715528c78b424ee, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/330396111f574a768e4e28a1280f1e0c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=35.5 K 2024-12-04T15:23:03,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:03,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:03,980 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 36cb77704df048a18ba4d2c8a46b12f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325781265 2024-12-04T15:23:03,981 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e91d9f45d664b488715528c78b424ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733325781405 2024-12-04T15:23:03,981 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 330396111f574a768e4e28a1280f1e0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733325782558 2024-12-04T15:23:03,996 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:04,013 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#382 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:04,013 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/3f0e7d8113424af7beaa4e2dc209e078 is 50, key is test_row_0/B:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:04,024 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412048fe17aa780b54e7dbc287e40bef8572e_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:04,027 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412048fe17aa780b54e7dbc287e40bef8572e_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:04,027 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412048fe17aa780b54e7dbc287e40bef8572e_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-04T15:23:04,037 INFO [Thread-1809 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-04T15:23:04,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:04,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-04T15:23:04,040 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:04,041 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:04,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:04,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:04,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742266_1442 (size=12459) 2024-12-04T15:23:04,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742267_1443 (size=4469) 2024-12-04T15:23:04,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:04,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:23:04,076 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#381 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:04,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:04,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:04,077 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f4f1c95695fa4b68aeecdfcadb0c32bf is 175, key is test_row_0/A:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:04,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204918d855970a14a60ad48615a37776966_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325784073/Put/seqid=0 2024-12-04T15:23:04,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742268_1444 (size=31413) 2024-12-04T15:23:04,115 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f4f1c95695fa4b68aeecdfcadb0c32bf as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f4f1c95695fa4b68aeecdfcadb0c32bf 2024-12-04T15:23:04,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325844109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325844111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325844116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325844118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,128 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into f4f1c95695fa4b68aeecdfcadb0c32bf(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:04,128 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:04,129 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=13, startTime=1733325783976; duration=0sec 2024-12-04T15:23:04,129 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:04,129 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:04,129 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:04,130 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:04,130 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:04,131 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,131 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/684a7becfc6f4c7cb17a71a74d5793af, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7a291a18a4cb4f308670a09a159e2a82, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3350d1cc23024b38aa475de4c79ce3f9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=35.5 K 2024-12-04T15:23:04,131 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 684a7becfc6f4c7cb17a71a74d5793af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733325781265 2024-12-04T15:23:04,131 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a291a18a4cb4f308670a09a159e2a82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1733325781405 2024-12-04T15:23:04,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742269_1445 (size=12304) 2024-12-04T15:23:04,137 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3350d1cc23024b38aa475de4c79ce3f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733325782558 2024-12-04T15:23:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:04,151 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:04,152 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/8a62868eaf7b4585b69c4917215dfb67 is 50, key is test_row_0/C:col10/1733325783682/Put/seqid=0 2024-12-04T15:23:04,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742270_1446 (size=12459) 2024-12-04T15:23:04,185 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/8a62868eaf7b4585b69c4917215dfb67 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/8a62868eaf7b4585b69c4917215dfb67 2024-12-04T15:23:04,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:04,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,199 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 8a62868eaf7b4585b69c4917215dfb67(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:04,199 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:04,199 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=13, startTime=1733325783980; duration=0sec 2024-12-04T15:23:04,199 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:04,199 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:04,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325844222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325844226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325844226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325844227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:04,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325844431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325844432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325844432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325844433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,460 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/3f0e7d8113424af7beaa4e2dc209e078 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/3f0e7d8113424af7beaa4e2dc209e078 2024-12-04T15:23:04,465 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 3f0e7d8113424af7beaa4e2dc209e078(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:04,465 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:04,466 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=13, startTime=1733325783978; duration=0sec 2024-12-04T15:23:04,466 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:04,466 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:04,504 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:04,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:04,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,537 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:04,555 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204918d855970a14a60ad48615a37776966_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204918d855970a14a60ad48615a37776966_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:04,556 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/42ccde258ae54b57a6a347140d5c772c, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:04,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/42ccde258ae54b57a6a347140d5c772c is 175, key is test_row_0/A:col10/1733325784073/Put/seqid=0 2024-12-04T15:23:04,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742271_1447 (size=31105) 2024-12-04T15:23:04,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:04,659 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325844738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325844741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325844741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:04,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325844744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,822 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:04,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:04,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,823 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:04,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:04,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:04,984 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/42ccde258ae54b57a6a347140d5c772c 2024-12-04T15:23:05,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/d9f5744d7bce4288a12585c3e3e8cbd1 is 50, key is test_row_0/B:col10/1733325784073/Put/seqid=0 2024-12-04T15:23:05,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742272_1448 (size=12151) 2024-12-04T15:23:05,144 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:05,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:05,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:05,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:05,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325845246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:05,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325845250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325845250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:05,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325845261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,312 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:05,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,480 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:05,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:05,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/d9f5744d7bce4288a12585c3e3e8cbd1 2024-12-04T15:23:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/64291b3037254b2daa5366a59828e9e7 is 50, key is test_row_0/C:col10/1733325784073/Put/seqid=0 2024-12-04T15:23:05,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742273_1449 (size=12151) 2024-12-04T15:23:05,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/64291b3037254b2daa5366a59828e9e7 2024-12-04T15:23:05,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/42ccde258ae54b57a6a347140d5c772c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c 2024-12-04T15:23:05,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c, entries=150, sequenceid=159, filesize=30.4 K 2024-12-04T15:23:05,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/d9f5744d7bce4288a12585c3e3e8cbd1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d9f5744d7bce4288a12585c3e3e8cbd1 2024-12-04T15:23:05,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d9f5744d7bce4288a12585c3e3e8cbd1, entries=150, sequenceid=159, filesize=11.9 K 2024-12-04T15:23:05,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/64291b3037254b2daa5366a59828e9e7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/64291b3037254b2daa5366a59828e9e7 2024-12-04T15:23:05,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/64291b3037254b2daa5366a59828e9e7, entries=150, sequenceid=159, filesize=11.9 K 2024-12-04T15:23:05,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:05,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 96aa8a9c538d7176a93d416eb9d9bfac in 1569ms, sequenceid=159, compaction requested=false 2024-12-04T15:23:05,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:05,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:05,810 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:05,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:05,811 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:05,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120421cc1df9d0a947c0a4b4ae2300543f63_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325784116/Put/seqid=0 2024-12-04T15:23:05,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742274_1450 (size=12304) 2024-12-04T15:23:05,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:05,852 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120421cc1df9d0a947c0a4b4ae2300543f63_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120421cc1df9d0a947c0a4b4ae2300543f63_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:05,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/0aa30942c1834ff59ad942b9978e164f, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:05,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/0aa30942c1834ff59ad942b9978e164f is 175, key is test_row_0/A:col10/1733325784116/Put/seqid=0 2024-12-04T15:23:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742275_1451 (size=31105) 2024-12-04T15:23:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:06,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:06,269 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/0aa30942c1834ff59ad942b9978e164f 2024-12-04T15:23:06,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/86ec2511cf5741808aa02ec263fce3e3 is 50, key is test_row_0/B:col10/1733325784116/Put/seqid=0 2024-12-04T15:23:06,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742276_1452 (size=12151) 2024-12-04T15:23:06,296 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/86ec2511cf5741808aa02ec263fce3e3 2024-12-04T15:23:06,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325846299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/28fb4b67426b4989b70f7f267d71d536 is 50, key is test_row_0/C:col10/1733325784116/Put/seqid=0 2024-12-04T15:23:06,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325846302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325846305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325846316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742277_1453 (size=12151) 2024-12-04T15:23:06,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325846420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325846420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325846424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325846426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325846633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325846633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325846640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325846644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:06,728 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/28fb4b67426b4989b70f7f267d71d536 2024-12-04T15:23:06,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/0aa30942c1834ff59ad942b9978e164f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f 2024-12-04T15:23:06,752 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f, entries=150, sequenceid=175, filesize=30.4 K 2024-12-04T15:23:06,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/86ec2511cf5741808aa02ec263fce3e3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/86ec2511cf5741808aa02ec263fce3e3 2024-12-04T15:23:06,760 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/86ec2511cf5741808aa02ec263fce3e3, entries=150, sequenceid=175, filesize=11.9 K 2024-12-04T15:23:06,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/28fb4b67426b4989b70f7f267d71d536 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/28fb4b67426b4989b70f7f267d71d536 2024-12-04T15:23:06,786 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/28fb4b67426b4989b70f7f267d71d536, entries=150, sequenceid=175, filesize=11.9 K 2024-12-04T15:23:06,787 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 96aa8a9c538d7176a93d416eb9d9bfac in 976ms, sequenceid=175, compaction requested=true 2024-12-04T15:23:06,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:06,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:06,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-04T15:23:06,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-04T15:23:06,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-04T15:23:06,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7530 sec 2024-12-04T15:23:06,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.7580 sec 2024-12-04T15:23:06,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:06,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:23:06,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:06,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:06,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:06,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:06,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:06,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:06,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120471470eebbcac45b6b429a57d4c645dea_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:06,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742278_1454 (size=12304) 2024-12-04T15:23:07,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325846992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325847018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325847018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325847024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325847132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325847134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325847134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325847135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325847342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325847343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,351 DEBUG [Thread-1801 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8219 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:23:07,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325847343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325847344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325847344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,400 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:07,411 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120471470eebbcac45b6b429a57d4c645dea_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120471470eebbcac45b6b429a57d4c645dea_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:07,412 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a2c1bab8a7b24c859960b6a13a55946d, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:07,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a2c1bab8a7b24c859960b6a13a55946d is 175, key is test_row_0/A:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:07,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742279_1455 (size=31105) 2024-12-04T15:23:07,430 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a2c1bab8a7b24c859960b6a13a55946d 2024-12-04T15:23:07,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/82e815d00ce444c1b58ac91cdf32e244 is 50, key is test_row_0/B:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:07,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742280_1456 (size=12151) 2024-12-04T15:23:07,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/82e815d00ce444c1b58ac91cdf32e244 2024-12-04T15:23:07,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/bc8362d916fa4cda8dad8c9af022faf7 is 50, key is test_row_0/C:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:07,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742281_1457 (size=12151) 2024-12-04T15:23:07,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325847652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325847660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325847660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:07,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325847672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:07,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/bc8362d916fa4cda8dad8c9af022faf7 2024-12-04T15:23:07,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/a2c1bab8a7b24c859960b6a13a55946d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d 2024-12-04T15:23:07,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d, entries=150, sequenceid=199, filesize=30.4 K 2024-12-04T15:23:07,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/82e815d00ce444c1b58ac91cdf32e244 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/82e815d00ce444c1b58ac91cdf32e244 2024-12-04T15:23:07,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/82e815d00ce444c1b58ac91cdf32e244, entries=150, sequenceid=199, filesize=11.9 K 2024-12-04T15:23:07,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/bc8362d916fa4cda8dad8c9af022faf7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/bc8362d916fa4cda8dad8c9af022faf7 2024-12-04T15:23:07,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/bc8362d916fa4cda8dad8c9af022faf7, entries=150, sequenceid=199, filesize=11.9 K 2024-12-04T15:23:07,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 96aa8a9c538d7176a93d416eb9d9bfac in 1024ms, sequenceid=199, compaction requested=true 2024-12-04T15:23:07,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:07,976 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:07,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:07,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:07,977 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:07,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:07,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:07,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:07,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:07,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:07,978 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:07,978 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:07,978 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/3f0e7d8113424af7beaa4e2dc209e078, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d9f5744d7bce4288a12585c3e3e8cbd1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/86ec2511cf5741808aa02ec263fce3e3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/82e815d00ce444c1b58ac91cdf32e244] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=47.8 K 2024-12-04T15:23:07,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124728 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:07,979 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f0e7d8113424af7beaa4e2dc209e078, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733325782558 2024-12-04T15:23:07,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:07,979 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:07,979 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f4f1c95695fa4b68aeecdfcadb0c32bf, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=121.8 K 2024-12-04T15:23:07,979 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:07,979 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d9f5744d7bce4288a12585c3e3e8cbd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733325783761 2024-12-04T15:23:07,979 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f4f1c95695fa4b68aeecdfcadb0c32bf, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d] 2024-12-04T15:23:07,980 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 86ec2511cf5741808aa02ec263fce3e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733325784088 2024-12-04T15:23:07,980 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4f1c95695fa4b68aeecdfcadb0c32bf, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733325782558 2024-12-04T15:23:07,980 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 82e815d00ce444c1b58ac91cdf32e244, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325786301 2024-12-04T15:23:07,981 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42ccde258ae54b57a6a347140d5c772c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733325783761 2024-12-04T15:23:07,982 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0aa30942c1834ff59ad942b9978e164f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733325784088 2024-12-04T15:23:07,982 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2c1bab8a7b24c859960b6a13a55946d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325786301 2024-12-04T15:23:07,992 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:07,993 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#393 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:07,995 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/c7fd48e0cea6448a92c3b41f3959147b is 50, key is test_row_0/B:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:08,012 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120498628ebb08ac43e890eca925e28c2bf9_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:08,014 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120498628ebb08ac43e890eca925e28c2bf9_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:08,014 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120498628ebb08ac43e890eca925e28c2bf9_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:08,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742282_1458 (size=12595) 2024-12-04T15:23:08,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742283_1459 (size=4469) 2024-12-04T15:23:08,021 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#394 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:08,022 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/7a282a99985347529f0bc6753d8953e4 is 175, key is test_row_0/A:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:08,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742284_1460 (size=31549) 2024-12-04T15:23:08,035 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/7a282a99985347529f0bc6753d8953e4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7a282a99985347529f0bc6753d8953e4 2024-12-04T15:23:08,041 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into 7a282a99985347529f0bc6753d8953e4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:08,041 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:08,041 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=12, startTime=1733325787976; duration=0sec 2024-12-04T15:23:08,041 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:08,041 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:08,041 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:08,042 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:08,042 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:08,042 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,043 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/8a62868eaf7b4585b69c4917215dfb67, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/64291b3037254b2daa5366a59828e9e7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/28fb4b67426b4989b70f7f267d71d536, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/bc8362d916fa4cda8dad8c9af022faf7] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=47.8 K 2024-12-04T15:23:08,043 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a62868eaf7b4585b69c4917215dfb67, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733325782558 2024-12-04T15:23:08,044 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64291b3037254b2daa5366a59828e9e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733325783761 2024-12-04T15:23:08,044 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28fb4b67426b4989b70f7f267d71d536, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733325784088 2024-12-04T15:23:08,045 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc8362d916fa4cda8dad8c9af022faf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325786301 2024-12-04T15:23:08,058 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#395 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:08,059 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/6c0eb835fbba48c6a13ddce0ea526b0c is 50, key is test_row_0/C:col10/1733325786301/Put/seqid=0 2024-12-04T15:23:08,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742285_1461 (size=12595) 2024-12-04T15:23:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-04T15:23:08,158 INFO [Thread-1809 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-04T15:23:08,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-04T15:23:08,162 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-04T15:23:08,163 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:08,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:08,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:23:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:08,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204802717efd0184cbe9f89404241d5e8e4_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325786966/Put/seqid=0 2024-12-04T15:23:08,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742286_1462 (size=14794) 2024-12-04T15:23:08,200 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:08,207 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204802717efd0184cbe9f89404241d5e8e4_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204802717efd0184cbe9f89404241d5e8e4_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:08,209 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/5640154e81ab4ac5a86aada16243023c, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:08,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/5640154e81ab4ac5a86aada16243023c is 175, key is test_row_0/A:col10/1733325786966/Put/seqid=0 2024-12-04T15:23:08,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325848213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325848215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325848222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325848222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742287_1463 (size=39749) 2024-12-04T15:23:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-04T15:23:08,317 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-04T15:23:08,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:08,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325848324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325848324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325848332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325848332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,425 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/c7fd48e0cea6448a92c3b41f3959147b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c7fd48e0cea6448a92c3b41f3959147b 2024-12-04T15:23:08,433 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into c7fd48e0cea6448a92c3b41f3959147b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:08,433 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:08,433 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=12, startTime=1733325787977; duration=0sec 2024-12-04T15:23:08,434 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:08,434 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-04T15:23:08,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,473 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/6c0eb835fbba48c6a13ddce0ea526b0c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6c0eb835fbba48c6a13ddce0ea526b0c 2024-12-04T15:23:08,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-04T15:23:08,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:08,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,478 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 6c0eb835fbba48c6a13ddce0ea526b0c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:08,478 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:08,478 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=12, startTime=1733325787977; duration=0sec 2024-12-04T15:23:08,478 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:08,478 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:08,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325848529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325848532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325848538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325848539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-04T15:23:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:08,637 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/5640154e81ab4ac5a86aada16243023c 2024-12-04T15:23:08,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/16b23c115c534beb90d25209784fd388 is 50, key is test_row_0/B:col10/1733325786966/Put/seqid=0 2024-12-04T15:23:08,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742288_1464 (size=12151) 2024-12-04T15:23:08,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/16b23c115c534beb90d25209784fd388 2024-12-04T15:23:08,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/fe26157428fc403fbfa825e23f1fce32 is 50, key is test_row_0/C:col10/1733325786966/Put/seqid=0 2024-12-04T15:23:08,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742289_1465 (size=12151) 2024-12-04T15:23:08,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/fe26157428fc403fbfa825e23f1fce32 2024-12-04T15:23:08,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/5640154e81ab4ac5a86aada16243023c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c 2024-12-04T15:23:08,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c, entries=200, sequenceid=213, filesize=38.8 K 2024-12-04T15:23:08,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/16b23c115c534beb90d25209784fd388 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/16b23c115c534beb90d25209784fd388 2024-12-04T15:23:08,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/16b23c115c534beb90d25209784fd388, entries=150, sequenceid=213, filesize=11.9 K 2024-12-04T15:23:08,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/fe26157428fc403fbfa825e23f1fce32 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/fe26157428fc403fbfa825e23f1fce32 2024-12-04T15:23:08,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/fe26157428fc403fbfa825e23f1fce32, entries=150, sequenceid=213, filesize=11.9 K 2024-12-04T15:23:08,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 96aa8a9c538d7176a93d416eb9d9bfac in 539ms, sequenceid=213, compaction requested=false 2024-12-04T15:23:08,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-04T15:23:08,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:08,783 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:08,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:08,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046400492c880b49018966167ddfddc996_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325788221/Put/seqid=0 2024-12-04T15:23:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742290_1466 (size=12304) 2024-12-04T15:23:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:08,799 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046400492c880b49018966167ddfddc996_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046400492c880b49018966167ddfddc996_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:08,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e7f6717c0f5647feb66a1ce1774f9071, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:08,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e7f6717c0f5647feb66a1ce1774f9071 is 175, key is test_row_0/A:col10/1733325788221/Put/seqid=0 2024-12-04T15:23:08,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742291_1467 (size=31105) 2024-12-04T15:23:08,828 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e7f6717c0f5647feb66a1ce1774f9071 2024-12-04T15:23:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/e9b90727d1da497eb579ac6e7877e5f0 is 50, key is test_row_0/B:col10/1733325788221/Put/seqid=0 2024-12-04T15:23:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:08,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:08,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742292_1468 (size=12151) 2024-12-04T15:23:08,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325848853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325848860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325848860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325848861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325848964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325848968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325848968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:08,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325848973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325849170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325849171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325849172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325849178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,242 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/e9b90727d1da497eb579ac6e7877e5f0 2024-12-04T15:23:09,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3c98eb39e43e40cabbe8e7b4064c4fad is 50, key is test_row_0/C:col10/1733325788221/Put/seqid=0 2024-12-04T15:23:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-04T15:23:09,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742293_1469 (size=12151) 2024-12-04T15:23:09,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325849477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325849477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325849479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:09,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325849483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:09,678 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3c98eb39e43e40cabbe8e7b4064c4fad 2024-12-04T15:23:09,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e7f6717c0f5647feb66a1ce1774f9071 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071 2024-12-04T15:23:09,689 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071, entries=150, sequenceid=238, filesize=30.4 K 2024-12-04T15:23:09,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/e9b90727d1da497eb579ac6e7877e5f0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e9b90727d1da497eb579ac6e7877e5f0 2024-12-04T15:23:09,694 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e9b90727d1da497eb579ac6e7877e5f0, entries=150, sequenceid=238, filesize=11.9 K 2024-12-04T15:23:09,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3c98eb39e43e40cabbe8e7b4064c4fad as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3c98eb39e43e40cabbe8e7b4064c4fad 2024-12-04T15:23:09,701 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3c98eb39e43e40cabbe8e7b4064c4fad, entries=150, sequenceid=238, filesize=11.9 K 2024-12-04T15:23:09,702 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 96aa8a9c538d7176a93d416eb9d9bfac in 919ms, sequenceid=238, compaction requested=true 2024-12-04T15:23:09,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:09,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:09,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-04T15:23:09,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-04T15:23:09,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-04T15:23:09,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5410 sec 2024-12-04T15:23:09,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.5480 sec 2024-12-04T15:23:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:09,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:23:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:10,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c41cc49f6f674d4cb8fa070dd4621ec5_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742294_1470 (size=14794) 2024-12-04T15:23:10,031 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:10,035 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c41cc49f6f674d4cb8fa070dd4621ec5_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c41cc49f6f674d4cb8fa070dd4621ec5_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:10,036 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/206c312be65e4d5795b1000d5679474e, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:10,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/206c312be65e4d5795b1000d5679474e is 175, key is test_row_0/A:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325850039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325850040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325850044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325850045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742295_1471 (size=39749) 2024-12-04T15:23:10,061 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/206c312be65e4d5795b1000d5679474e 2024-12-04T15:23:10,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7c21c86a2fe44474b3b6562742120b2c is 50, key is test_row_0/B:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742296_1472 (size=12151) 2024-12-04T15:23:10,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7c21c86a2fe44474b3b6562742120b2c 2024-12-04T15:23:10,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/07f53bf378074ad9be9c12a14d321261 is 50, key is test_row_0/C:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325850152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325850152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325850158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742297_1473 (size=12151) 2024-12-04T15:23:10,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/07f53bf378074ad9be9c12a14d321261 2024-12-04T15:23:10,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325850160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/206c312be65e4d5795b1000d5679474e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e 2024-12-04T15:23:10,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e, entries=200, sequenceid=252, filesize=38.8 K 2024-12-04T15:23:10,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7c21c86a2fe44474b3b6562742120b2c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7c21c86a2fe44474b3b6562742120b2c 2024-12-04T15:23:10,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7c21c86a2fe44474b3b6562742120b2c, entries=150, sequenceid=252, filesize=11.9 K 2024-12-04T15:23:10,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/07f53bf378074ad9be9c12a14d321261 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/07f53bf378074ad9be9c12a14d321261 2024-12-04T15:23:10,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/07f53bf378074ad9be9c12a14d321261, entries=150, sequenceid=252, filesize=11.9 K 2024-12-04T15:23:10,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 96aa8a9c538d7176a93d416eb9d9bfac in 205ms, sequenceid=252, compaction requested=true 2024-12-04T15:23:10,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:10,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:10,192 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:10,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:10,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:10,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:10,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:10,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:10,192 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:10,193 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:10,193 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142152 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:10,193 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:10,193 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,193 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:10,193 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c7fd48e0cea6448a92c3b41f3959147b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/16b23c115c534beb90d25209784fd388, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e9b90727d1da497eb579ac6e7877e5f0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7c21c86a2fe44474b3b6562742120b2c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=47.9 K 2024-12-04T15:23:10,193 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,193 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7a282a99985347529f0bc6753d8953e4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=138.8 K 2024-12-04T15:23:10,193 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,193 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7a282a99985347529f0bc6753d8953e4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e] 2024-12-04T15:23:10,194 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c7fd48e0cea6448a92c3b41f3959147b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325786301 2024-12-04T15:23:10,194 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a282a99985347529f0bc6753d8953e4, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325786301 2024-12-04T15:23:10,194 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 16b23c115c534beb90d25209784fd388, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733325786966 2024-12-04T15:23:10,194 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5640154e81ab4ac5a86aada16243023c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733325786966 2024-12-04T15:23:10,194 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e9b90727d1da497eb579ac6e7877e5f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733325788212 2024-12-04T15:23:10,194 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7f6717c0f5647feb66a1ce1774f9071, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733325788212 2024-12-04T15:23:10,195 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c21c86a2fe44474b3b6562742120b2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733325788858 2024-12-04T15:23:10,195 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 206c312be65e4d5795b1000d5679474e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733325788858 2024-12-04T15:23:10,225 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#405 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:10,226 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/b6925f80ee1c4ec99cb1368d1a555152 is 50, key is test_row_0/B:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,236 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:10,245 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412048cf36f9cccba449aa5ded5a70ddafedc_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:10,247 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412048cf36f9cccba449aa5ded5a70ddafedc_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:10,248 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412048cf36f9cccba449aa5ded5a70ddafedc_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:10,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742298_1474 (size=12731) 2024-12-04T15:23:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-04T15:23:10,269 INFO [Thread-1809 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-04T15:23:10,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-04T15:23:10,274 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:10,274 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/b6925f80ee1c4ec99cb1368d1a555152 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b6925f80ee1c4ec99cb1368d1a555152 2024-12-04T15:23:10,274 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:10,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:10,279 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into b6925f80ee1c4ec99cb1368d1a555152(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:10,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:10,279 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=12, startTime=1733325790192; duration=0sec 2024-12-04T15:23:10,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:10,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:10,279 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:10,280 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:10,280 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:10,280 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,280 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6c0eb835fbba48c6a13ddce0ea526b0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/fe26157428fc403fbfa825e23f1fce32, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3c98eb39e43e40cabbe8e7b4064c4fad, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/07f53bf378074ad9be9c12a14d321261] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=47.9 K 2024-12-04T15:23:10,281 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c0eb835fbba48c6a13ddce0ea526b0c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325786301 2024-12-04T15:23:10,281 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fe26157428fc403fbfa825e23f1fce32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733325786966 2024-12-04T15:23:10,281 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c98eb39e43e40cabbe8e7b4064c4fad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733325788212 2024-12-04T15:23:10,282 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 07f53bf378074ad9be9c12a14d321261, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733325788858 2024-12-04T15:23:10,292 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#407 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:10,293 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/4d6f8ec5814542589506e26b26a2ac3a is 50, key is test_row_0/C:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742299_1475 (size=4469) 2024-12-04T15:23:10,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742300_1476 (size=12731) 2024-12-04T15:23:10,336 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/4d6f8ec5814542589506e26b26a2ac3a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d6f8ec5814542589506e26b26a2ac3a 2024-12-04T15:23:10,343 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 4d6f8ec5814542589506e26b26a2ac3a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:10,343 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:10,343 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=12, startTime=1733325790192; duration=0sec 2024-12-04T15:23:10,343 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:10,343 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:10,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:10,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:10,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:10,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e52e6255edc94c32865a1b880503621d_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325790365/Put/seqid=0 2024-12-04T15:23:10,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325850381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325850382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325850382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325850384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742301_1477 (size=14994) 2024-12-04T15:23:10,400 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:10,408 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204e52e6255edc94c32865a1b880503621d_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e52e6255edc94c32865a1b880503621d_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:10,411 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/857704ebb94749008f1043b9d07e8612, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:10,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/857704ebb94749008f1043b9d07e8612 is 175, key is test_row_0/A:col10/1733325790365/Put/seqid=0 2024-12-04T15:23:10,426 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742302_1478 (size=39949) 2024-12-04T15:23:10,442 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/857704ebb94749008f1043b9d07e8612 2024-12-04T15:23:10,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/f3853f57af5440f8b41888edbcf44766 is 50, key is test_row_0/B:col10/1733325790365/Put/seqid=0 2024-12-04T15:23:10,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325850488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325850489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742303_1479 (size=12301) 2024-12-04T15:23:10,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/f3853f57af5440f8b41888edbcf44766 2024-12-04T15:23:10,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325850496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325850499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3ffe7dc91db045d4a2eead8f649486e6 is 50, key is test_row_0/C:col10/1733325790365/Put/seqid=0 2024-12-04T15:23:10,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742304_1480 (size=12301) 2024-12-04T15:23:10,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:10,582 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:10,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:10,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,696 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#406 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:10,697 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/510a7708c2be4068bb400a3978c77f5d is 175, key is test_row_0/A:col10/1733325788858/Put/seqid=0 2024-12-04T15:23:10,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325850695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325850697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742305_1481 (size=31685) 2024-12-04T15:23:10,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325850704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325850704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,717 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/510a7708c2be4068bb400a3978c77f5d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/510a7708c2be4068bb400a3978c77f5d 2024-12-04T15:23:10,725 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into 510a7708c2be4068bb400a3978c77f5d(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:10,725 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:10,725 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=12, startTime=1733325790192; duration=0sec 2024-12-04T15:23:10,725 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:10,725 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:10,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,751 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:10,904 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:10,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:10,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:10,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:10,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3ffe7dc91db045d4a2eead8f649486e6 2024-12-04T15:23:10,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/857704ebb94749008f1043b9d07e8612 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612 2024-12-04T15:23:10,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612, entries=200, sequenceid=278, filesize=39.0 K 2024-12-04T15:23:10,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/f3853f57af5440f8b41888edbcf44766 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/f3853f57af5440f8b41888edbcf44766 2024-12-04T15:23:10,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/f3853f57af5440f8b41888edbcf44766, entries=150, sequenceid=278, filesize=12.0 K 2024-12-04T15:23:10,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3ffe7dc91db045d4a2eead8f649486e6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3ffe7dc91db045d4a2eead8f649486e6 2024-12-04T15:23:11,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325851004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3ffe7dc91db045d4a2eead8f649486e6, entries=150, sequenceid=278, filesize=12.0 K 2024-12-04T15:23:11,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 96aa8a9c538d7176a93d416eb9d9bfac in 645ms, sequenceid=278, compaction requested=false 2024-12-04T15:23:11,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:11,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:23:11,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:11,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:11,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:11,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:11,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:11,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:11,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:11,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412047a87662cda5d4403a470a47a82787a24_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:11,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742306_1482 (size=14994) 2024-12-04T15:23:11,071 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:11,076 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412047a87662cda5d4403a470a47a82787a24_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047a87662cda5d4403a470a47a82787a24_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:11,078 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22c8d0d3be084e2eb979ef3caf88b67f, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:11,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325851070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22c8d0d3be084e2eb979ef3caf88b67f is 175, key is test_row_0/A:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:11,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325851075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325851078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742307_1483 (size=39949) 2024-12-04T15:23:11,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325851180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325851187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325851188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:11,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:11,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,372 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:11,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:11,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:11,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325851388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325851393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325851393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,504 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22c8d0d3be084e2eb979ef3caf88b67f 2024-12-04T15:23:11,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325851512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,531 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:11,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/e8640d529078495fa491851e8b631a6c is 50, key is test_row_0/B:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742308_1484 (size=12301) 2024-12-04T15:23:11,687 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:11,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:11,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325851704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325851708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325851712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:11,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:11,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:11,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:11,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:11,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/e8640d529078495fa491851e8b631a6c 2024-12-04T15:23:11,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/ab45264411b44a4ab77968135b348e55 is 50, key is test_row_0/C:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:12,007 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742309_1485 (size=12301) 2024-12-04T15:23:12,172 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325852220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325852221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:12,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325852228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,332 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:12,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/ab45264411b44a4ab77968135b348e55 2024-12-04T15:23:12,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22c8d0d3be084e2eb979ef3caf88b67f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f 2024-12-04T15:23:12,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f, entries=200, sequenceid=292, filesize=39.0 K 2024-12-04T15:23:12,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/e8640d529078495fa491851e8b631a6c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e8640d529078495fa491851e8b631a6c 2024-12-04T15:23:12,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e8640d529078495fa491851e8b631a6c, entries=150, sequenceid=292, filesize=12.0 K 2024-12-04T15:23:12,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/ab45264411b44a4ab77968135b348e55 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ab45264411b44a4ab77968135b348e55 2024-12-04T15:23:12,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ab45264411b44a4ab77968135b348e55, entries=150, sequenceid=292, filesize=12.0 K 2024-12-04T15:23:12,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 96aa8a9c538d7176a93d416eb9d9bfac in 1488ms, sequenceid=292, compaction requested=true 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:12,502 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:12,502 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:12,503 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:12,503 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:12,503 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,504 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/510a7708c2be4068bb400a3978c77f5d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=109.0 K 2024-12-04T15:23:12,504 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,504 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/510a7708c2be4068bb400a3978c77f5d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f] 2024-12-04T15:23:12,504 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:12,504 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:12,504 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,504 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b6925f80ee1c4ec99cb1368d1a555152, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/f3853f57af5440f8b41888edbcf44766, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e8640d529078495fa491851e8b631a6c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=36.5 K 2024-12-04T15:23:12,505 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 510a7708c2be4068bb400a3978c77f5d, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733325788858 2024-12-04T15:23:12,505 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b6925f80ee1c4ec99cb1368d1a555152, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733325788858 2024-12-04T15:23:12,505 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 857704ebb94749008f1043b9d07e8612, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325790037 2024-12-04T15:23:12,506 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f3853f57af5440f8b41888edbcf44766, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325790037 2024-12-04T15:23:12,506 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22c8d0d3be084e2eb979ef3caf88b67f, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733325790379 2024-12-04T15:23:12,506 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e8640d529078495fa491851e8b631a6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733325790379 2024-12-04T15:23:12,515 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:12,519 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#415 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:12,520 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1fc7133e5bd745ab9316927e09117c9d is 50, key is test_row_0/B:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:12,527 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204ec7438baa02a4975956d3ca64fa55d81_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:12,529 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204ec7438baa02a4975956d3ca64fa55d81_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:12,529 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204ec7438baa02a4975956d3ca64fa55d81_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:12,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:23:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:12,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:12,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742311_1487 (size=4469) 2024-12-04T15:23:12,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742310_1486 (size=12983) 2024-12-04T15:23:12,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412043c11f130e31a4c459c6d41301b494bb2_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325791069/Put/seqid=0 2024-12-04T15:23:12,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:12,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325852614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742312_1488 (size=14994) 2024-12-04T15:23:12,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:12,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325852728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325852932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,969 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#414 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:12,969 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e473ab8328f3453bb20e3f9e8b8cbbfd is 175, key is test_row_0/A:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:12,975 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:12,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:12,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:12,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:12,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,000 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1fc7133e5bd745ab9316927e09117c9d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1fc7133e5bd745ab9316927e09117c9d 2024-12-04T15:23:13,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742313_1489 (size=31937) 2024-12-04T15:23:13,025 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,029 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 1fc7133e5bd745ab9316927e09117c9d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:13,029 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:13,029 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=13, startTime=1733325792502; duration=0sec 2024-12-04T15:23:13,029 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:13,029 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:13,029 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:13,032 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:13,032 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:13,032 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,032 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d6f8ec5814542589506e26b26a2ac3a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3ffe7dc91db045d4a2eead8f649486e6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ab45264411b44a4ab77968135b348e55] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=36.5 K 2024-12-04T15:23:13,032 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d6f8ec5814542589506e26b26a2ac3a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733325788858 2024-12-04T15:23:13,033 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412043c11f130e31a4c459c6d41301b494bb2_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412043c11f130e31a4c459c6d41301b494bb2_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:13,033 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ffe7dc91db045d4a2eead8f649486e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733325790037 2024-12-04T15:23:13,033 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e473ab8328f3453bb20e3f9e8b8cbbfd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e473ab8328f3453bb20e3f9e8b8cbbfd 2024-12-04T15:23:13,034 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ab45264411b44a4ab77968135b348e55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733325790379 2024-12-04T15:23:13,035 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/93b89fc6c1144e198b03aed9d324d5ce, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:13,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/93b89fc6c1144e198b03aed9d324d5ce is 175, key is test_row_0/A:col10/1733325791069/Put/seqid=0 2024-12-04T15:23:13,046 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#417 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:13,046 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/577748ea24f04ab3a06fddb2166ba257 is 50, key is test_row_0/C:col10/1733325791012/Put/seqid=0 2024-12-04T15:23:13,049 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into e473ab8328f3453bb20e3f9e8b8cbbfd(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:13,049 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:13,049 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=13, startTime=1733325792502; duration=0sec 2024-12-04T15:23:13,049 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:13,049 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:13,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742314_1490 (size=39949) 2024-12-04T15:23:13,094 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/93b89fc6c1144e198b03aed9d324d5ce 2024-12-04T15:23:13,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742315_1491 (size=12983) 2024-12-04T15:23:13,117 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/577748ea24f04ab3a06fddb2166ba257 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/577748ea24f04ab3a06fddb2166ba257 2024-12-04T15:23:13,121 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 577748ea24f04ab3a06fddb2166ba257(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:13,121 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:13,121 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=13, startTime=1733325792502; duration=0sec 2024-12-04T15:23:13,121 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:13,121 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:13,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/6f91e5dfc9a84854941787a1cdc871a7 is 50, key is test_row_0/B:col10/1733325791069/Put/seqid=0 2024-12-04T15:23:13,131 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:13,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:13,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742316_1492 (size=12301) 2024-12-04T15:23:13,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/6f91e5dfc9a84854941787a1cdc871a7 2024-12-04T15:23:13,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/985ebaa99f8b4f64bb7f46c8114e1e36 is 50, key is test_row_0/C:col10/1733325791069/Put/seqid=0 2024-12-04T15:23:13,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742317_1493 (size=12301) 2024-12-04T15:23:13,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/985ebaa99f8b4f64bb7f46c8114e1e36 2024-12-04T15:23:13,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/93b89fc6c1144e198b03aed9d324d5ce as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce 2024-12-04T15:23:13,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325853244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325853244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325853248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325853252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce, entries=200, sequenceid=316, filesize=39.0 K 2024-12-04T15:23:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/6f91e5dfc9a84854941787a1cdc871a7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/6f91e5dfc9a84854941787a1cdc871a7 2024-12-04T15:23:13,285 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/6f91e5dfc9a84854941787a1cdc871a7, entries=150, sequenceid=316, filesize=12.0 K 2024-12-04T15:23:13,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:13,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:13,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/985ebaa99f8b4f64bb7f46c8114e1e36 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/985ebaa99f8b4f64bb7f46c8114e1e36 2024-12-04T15:23:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/985ebaa99f8b4f64bb7f46c8114e1e36, entries=150, sequenceid=316, filesize=12.0 K 2024-12-04T15:23:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 96aa8a9c538d7176a93d416eb9d9bfac in 760ms, sequenceid=316, compaction requested=false 2024-12-04T15:23:13,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,448 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:13,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-04T15:23:13,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,449 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:23:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204a7598e859d52400ab3cca7dfde47c8c7_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325792612/Put/seqid=0 2024-12-04T15:23:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742318_1494 (size=9914) 2024-12-04T15:23:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,553 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204a7598e859d52400ab3cca7dfde47c8c7_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204a7598e859d52400ab3cca7dfde47c8c7_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22bf0ee74d704d3fbace4d3bfed49234, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22bf0ee74d704d3fbace4d3bfed49234 is 175, key is test_row_0/A:col10/1733325792612/Put/seqid=0 2024-12-04T15:23:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742319_1495 (size=22561) 2024-12-04T15:23:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,612 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22bf0ee74d704d3fbace4d3bfed49234 2024-12-04T15:23:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/c3050df0f471407ab34b6b39994c72b7 is 50, key is test_row_0/B:col10/1733325792612/Put/seqid=0 2024-12-04T15:23:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742320_1496 (size=9857) 2024-12-04T15:23:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,736 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/c3050df0f471407ab34b6b39994c72b7 2024-12-04T15:23:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/024351512e8e4c6b901adc1cfb61d4b2 is 50, key is test_row_0/C:col10/1733325792612/Put/seqid=0 2024-12-04T15:23:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742321_1497 (size=9857) 2024-12-04T15:23:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,880 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/024351512e8e4c6b901adc1cfb61d4b2 2024-12-04T15:23:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/22bf0ee74d704d3fbace4d3bfed49234 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234 2024-12-04T15:23:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,940 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234, entries=100, sequenceid=331, filesize=22.0 K 2024-12-04T15:23:13,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/c3050df0f471407ab34b6b39994c72b7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c3050df0f471407ab34b6b39994c72b7 2024-12-04T15:23:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,959 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c3050df0f471407ab34b6b39994c72b7, entries=100, sequenceid=331, filesize=9.6 K 2024-12-04T15:23:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/024351512e8e4c6b901adc1cfb61d4b2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/024351512e8e4c6b901adc1cfb61d4b2 2024-12-04T15:23:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,970 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/024351512e8e4c6b901adc1cfb61d4b2, entries=100, sequenceid=331, filesize=9.6 K 2024-12-04T15:23:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,971 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=13.42 KB/13740 for 96aa8a9c538d7176a93d416eb9d9bfac in 522ms, sequenceid=331, compaction requested=true 2024-12-04T15:23:13,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:13,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:13,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-04T15:23:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-04T15:23:13,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,974 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-04T15:23:13,974 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6990 sec 2024-12-04T15:23:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 3.7030 sec 2024-12-04T15:23:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:23:14,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:14,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:14,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:14,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:14,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:14,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:14,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:14,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d2996ab1c61345f5bb9b740f11d18b7b_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742322_1498 (size=17534) 2024-12-04T15:23:14,315 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,320 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204d2996ab1c61345f5bb9b740f11d18b7b_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d2996ab1c61345f5bb9b740f11d18b7b_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:14,322 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/1f9edfad4e314323b3e799f77b6b9281, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:14,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/1f9edfad4e314323b3e799f77b6b9281 is 175, key is test_row_0/A:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742323_1499 (size=48639) 2024-12-04T15:23:14,363 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/1f9edfad4e314323b3e799f77b6b9281 2024-12-04T15:23:14,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7ee5a574074442d3a5bc27ec9c7fd69b is 50, key is test_row_0/B:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-04T15:23:14,393 INFO [Thread-1809 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-04T15:23:14,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:14,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-04T15:23:14,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:14,419 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:14,419 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:14,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742324_1500 (size=12301) 2024-12-04T15:23:14,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7ee5a574074442d3a5bc27ec9c7fd69b 2024-12-04T15:23:14,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/227dd4298e024f6297f893d815895da8 is 50, key is test_row_0/C:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742325_1501 (size=12301) 2024-12-04T15:23:14,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/227dd4298e024f6297f893d815895da8 2024-12-04T15:23:14,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/1f9edfad4e314323b3e799f77b6b9281 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281 2024-12-04T15:23:14,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:14,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281, entries=250, sequenceid=342, filesize=47.5 K 2024-12-04T15:23:14,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/7ee5a574074442d3a5bc27ec9c7fd69b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7ee5a574074442d3a5bc27ec9c7fd69b 2024-12-04T15:23:14,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7ee5a574074442d3a5bc27ec9c7fd69b, entries=150, sequenceid=342, filesize=12.0 K 2024-12-04T15:23:14,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/227dd4298e024f6297f893d815895da8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/227dd4298e024f6297f893d815895da8 2024-12-04T15:23:14,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/227dd4298e024f6297f893d815895da8, entries=150, sequenceid=342, filesize=12.0 K 2024-12-04T15:23:14,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=127.47 KB/130530 for 96aa8a9c538d7176a93d416eb9d9bfac in 286ms, sequenceid=342, compaction requested=true 2024-12-04T15:23:14,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:14,539 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:14,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:14,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:14,540 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:14,541 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143086 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:14,541 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:14,541 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,541 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e473ab8328f3453bb20e3f9e8b8cbbfd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=139.7 K 2024-12-04T15:23:14,541 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,541 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e473ab8328f3453bb20e3f9e8b8cbbfd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281] 2024-12-04T15:23:14,542 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e473ab8328f3453bb20e3f9e8b8cbbfd, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733325790379 2024-12-04T15:23:14,542 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47442 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:14,542 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:14,542 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,542 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1fc7133e5bd745ab9316927e09117c9d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/6f91e5dfc9a84854941787a1cdc871a7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c3050df0f471407ab34b6b39994c72b7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7ee5a574074442d3a5bc27ec9c7fd69b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=46.3 K 2024-12-04T15:23:14,542 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93b89fc6c1144e198b03aed9d324d5ce, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325791069 2024-12-04T15:23:14,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:14,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:14,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:14,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:14,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:14,544 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fc7133e5bd745ab9316927e09117c9d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733325790379 2024-12-04T15:23:14,544 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22bf0ee74d704d3fbace4d3bfed49234, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733325792565 2024-12-04T15:23:14,544 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f91e5dfc9a84854941787a1cdc871a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325791069 2024-12-04T15:23:14,545 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting c3050df0f471407ab34b6b39994c72b7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733325792565 2024-12-04T15:23:14,545 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f9edfad4e314323b3e799f77b6b9281, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733325794074 2024-12-04T15:23:14,545 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ee5a574074442d3a5bc27ec9c7fd69b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733325794147 2024-12-04T15:23:14,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:14,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:14,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:14,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:14,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:14,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:14,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:14,557 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:14,576 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:14,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:14,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:14,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,582 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#427 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:14,583 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1f1c140383fb43e0b60d6a40115f9bb7 is 50, key is test_row_0/B:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204434276281e8b42319c47c6986466327d_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325794525/Put/seqid=0 2024-12-04T15:23:14,604 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204995aabefecbb4dcfa626020acbe94740_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:14,606 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204995aabefecbb4dcfa626020acbe94740_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:14,607 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204995aabefecbb4dcfa626020acbe94740_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742328_1504 (size=14994) 2024-12-04T15:23:14,645 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:14,650 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204434276281e8b42319c47c6986466327d_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204434276281e8b42319c47c6986466327d_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742326_1502 (size=13119) 2024-12-04T15:23:14,657 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f7d7712ab2564dc0a56feb6fc369e534, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:14,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f7d7712ab2564dc0a56feb6fc369e534 is 175, key is test_row_0/A:col10/1733325794525/Put/seqid=0 2024-12-04T15:23:14,667 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/1f1c140383fb43e0b60d6a40115f9bb7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1f1c140383fb43e0b60d6a40115f9bb7 2024-12-04T15:23:14,672 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 1f1c140383fb43e0b60d6a40115f9bb7(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:14,672 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:14,672 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=12, startTime=1733325794540; duration=0sec 2024-12-04T15:23:14,672 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:14,672 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:14,673 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:14,674 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47442 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:14,674 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:14,674 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,674 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/577748ea24f04ab3a06fddb2166ba257, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/985ebaa99f8b4f64bb7f46c8114e1e36, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/024351512e8e4c6b901adc1cfb61d4b2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/227dd4298e024f6297f893d815895da8] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=46.3 K 2024-12-04T15:23:14,674 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 577748ea24f04ab3a06fddb2166ba257, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733325790379 2024-12-04T15:23:14,675 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 985ebaa99f8b4f64bb7f46c8114e1e36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325791069 2024-12-04T15:23:14,675 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 024351512e8e4c6b901adc1cfb61d4b2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733325792565 2024-12-04T15:23:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742327_1503 (size=4469) 2024-12-04T15:23:14,676 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 227dd4298e024f6297f893d815895da8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733325794147 2024-12-04T15:23:14,677 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#426 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:14,677 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e148155a99a245a8813c5441e578d245 is 175, key is test_row_0/A:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742329_1505 (size=39949) 2024-12-04T15:23:14,701 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f7d7712ab2564dc0a56feb6fc369e534 2024-12-04T15:23:14,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742330_1506 (size=32073) 2024-12-04T15:23:14,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:14,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325854698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:14,719 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#429 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:14,719 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/e28886c7ae04451e80400a8c4c719f5a is 50, key is test_row_0/C:col10/1733325794147/Put/seqid=0 2024-12-04T15:23:14,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:14,727 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/e148155a99a245a8813c5441e578d245 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e148155a99a245a8813c5441e578d245 2024-12-04T15:23:14,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/de866ae3808e474a811ad53274e27401 is 50, key is test_row_0/B:col10/1733325794525/Put/seqid=0 2024-12-04T15:23:14,734 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into e148155a99a245a8813c5441e578d245(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:14,734 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:14,734 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=12, startTime=1733325794539; duration=0sec 2024-12-04T15:23:14,734 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:14,734 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:14,736 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:14,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:14,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742332_1508 (size=12301) 2024-12-04T15:23:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742331_1507 (size=13119) 2024-12-04T15:23:14,775 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/e28886c7ae04451e80400a8c4c719f5a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/e28886c7ae04451e80400a8c4c719f5a 2024-12-04T15:23:14,783 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into e28886c7ae04451e80400a8c4c719f5a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:14,783 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:14,783 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=12, startTime=1733325794543; duration=0sec 2024-12-04T15:23:14,784 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:14,784 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:14,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:14,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325854817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:14,899 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:14,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:14,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:14,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:14,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:14,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:15,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325855022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:15,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:15,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/de866ae3808e474a811ad53274e27401 2024-12-04T15:23:15,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/714a9eccf26f407899f33fd3ea9c11e5 is 50, key is test_row_0/C:col10/1733325794525/Put/seqid=0 2024-12-04T15:23:15,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742333_1509 (size=12301) 2024-12-04T15:23:15,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/714a9eccf26f407899f33fd3ea9c11e5 2024-12-04T15:23:15,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f7d7712ab2564dc0a56feb6fc369e534 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534 2024-12-04T15:23:15,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:15,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:15,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534, entries=200, sequenceid=365, filesize=39.0 K 2024-12-04T15:23:15,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/de866ae3808e474a811ad53274e27401 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/de866ae3808e474a811ad53274e27401 2024-12-04T15:23:15,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/de866ae3808e474a811ad53274e27401, entries=150, sequenceid=365, filesize=12.0 K 2024-12-04T15:23:15,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/714a9eccf26f407899f33fd3ea9c11e5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/714a9eccf26f407899f33fd3ea9c11e5 2024-12-04T15:23:15,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/714a9eccf26f407899f33fd3ea9c11e5, entries=150, sequenceid=365, filesize=12.0 K 2024-12-04T15:23:15,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 96aa8a9c538d7176a93d416eb9d9bfac in 697ms, sequenceid=365, compaction requested=false 2024-12-04T15:23:15,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:15,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:15,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:23:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:15,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:15,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:15,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204aa54acca9fc545019f0413e64578a4a8_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:15,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742334_1510 (size=14994) 2024-12-04T15:23:15,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325855357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:15,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:15,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325855366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325855367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325855367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325855474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325855477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325855485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325855485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:15,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:15,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:15,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:15,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:15,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325855688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325855690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325855691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:15,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325855704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,732 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:15,737 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204aa54acca9fc545019f0413e64578a4a8_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204aa54acca9fc545019f0413e64578a4a8_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:15,738 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3976964b82554bb68185a54432adbf00, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:15,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3976964b82554bb68185a54432adbf00 is 175, key is test_row_0/A:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:15,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742335_1511 (size=39949) 2024-12-04T15:23:15,801 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=382, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3976964b82554bb68185a54432adbf00 2024-12-04T15:23:15,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/8b4abb7e85d449d7a3bdc71afd59cec1 is 50, key is test_row_0/B:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:15,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:15,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:15,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:15,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:15,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:15,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742336_1512 (size=12301) 2024-12-04T15:23:15,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/8b4abb7e85d449d7a3bdc71afd59cec1 2024-12-04T15:23:15,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/24777dc42bf147a2a817e22220285afb is 50, key is test_row_0/C:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:15,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742337_1513 (size=12301) 2024-12-04T15:23:15,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/24777dc42bf147a2a817e22220285afb 2024-12-04T15:23:15,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/3976964b82554bb68185a54432adbf00 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00 2024-12-04T15:23:16,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325856003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325856005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,016 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:16,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:16,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:16,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:16,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:16,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325856015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00, entries=200, sequenceid=382, filesize=39.0 K 2024-12-04T15:23:16,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325856025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/8b4abb7e85d449d7a3bdc71afd59cec1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8b4abb7e85d449d7a3bdc71afd59cec1 2024-12-04T15:23:16,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8b4abb7e85d449d7a3bdc71afd59cec1, entries=150, sequenceid=382, filesize=12.0 K 2024-12-04T15:23:16,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/24777dc42bf147a2a817e22220285afb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/24777dc42bf147a2a817e22220285afb 2024-12-04T15:23:16,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/24777dc42bf147a2a817e22220285afb, entries=150, sequenceid=382, filesize=12.0 K 2024-12-04T15:23:16,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 96aa8a9c538d7176a93d416eb9d9bfac in 831ms, sequenceid=382, compaction requested=true 2024-12-04T15:23:16,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:16,100 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:16,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:16,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:16,100 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:16,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:16,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:16,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:16,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:16,112 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111971 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:16,112 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:16,112 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,113 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e148155a99a245a8813c5441e578d245, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=109.3 K 2024-12-04T15:23:16,113 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,113 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e148155a99a245a8813c5441e578d245, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00] 2024-12-04T15:23:16,118 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e148155a99a245a8813c5441e578d245, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733325794147 2024-12-04T15:23:16,120 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:16,120 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:16,120 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,120 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1f1c140383fb43e0b60d6a40115f9bb7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/de866ae3808e474a811ad53274e27401, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8b4abb7e85d449d7a3bdc71afd59cec1] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=36.8 K 2024-12-04T15:23:16,121 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7d7712ab2564dc0a56feb6fc369e534, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733325794485 2024-12-04T15:23:16,123 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3976964b82554bb68185a54432adbf00, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733325794639 2024-12-04T15:23:16,124 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f1c140383fb43e0b60d6a40115f9bb7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733325794147 2024-12-04T15:23:16,128 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting de866ae3808e474a811ad53274e27401, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733325794485 2024-12-04T15:23:16,131 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b4abb7e85d449d7a3bdc71afd59cec1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733325794682 2024-12-04T15:23:16,135 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:16,151 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204c5a21cec9aa544c1a81a466d13f738cd_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:16,153 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204c5a21cec9aa544c1a81a466d13f738cd_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:16,154 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c5a21cec9aa544c1a81a466d13f738cd_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:16,170 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,171 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:16,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:16,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742338_1514 (size=4469) 2024-12-04T15:23:16,187 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#436 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:16,187 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/549db563a45645d6a75ae2ae5e9c11d3 is 50, key is test_row_0/B:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204cb13ef43713d48f586b8d93f0599a0cf_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325795366/Put/seqid=0 2024-12-04T15:23:16,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742339_1515 (size=13221) 2024-12-04T15:23:16,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742340_1516 (size=12454) 2024-12-04T15:23:16,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:16,290 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204cb13ef43713d48f586b8d93f0599a0cf_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204cb13ef43713d48f586b8d93f0599a0cf_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:16,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/03f8ce30b0204e67ad3142de6bf3ba8d, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:16,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/03f8ce30b0204e67ad3142de6bf3ba8d is 175, key is test_row_0/A:col10/1733325795366/Put/seqid=0 2024-12-04T15:23:16,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742341_1517 (size=31255) 2024-12-04T15:23:16,356 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=405, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/03f8ce30b0204e67ad3142de6bf3ba8d 2024-12-04T15:23:16,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/824c4a30ba3b4198b1493f3c55c4e7f3 is 50, key is test_row_0/B:col10/1733325795366/Put/seqid=0 2024-12-04T15:23:16,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742342_1518 (size=12301) 2024-12-04T15:23:16,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. as already flushing 2024-12-04T15:23:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:16,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:16,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325856544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325856545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325856547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325856547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,596 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#435 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:16,597 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/ca1f4b69bc364adfbec6a86084ea2fba is 175, key is test_row_0/A:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:16,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742343_1519 (size=32175) 2024-12-04T15:23:16,655 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/549db563a45645d6a75ae2ae5e9c11d3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/549db563a45645d6a75ae2ae5e9c11d3 2024-12-04T15:23:16,660 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 549db563a45645d6a75ae2ae5e9c11d3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:16,660 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:16,660 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=13, startTime=1733325796100; duration=0sec 2024-12-04T15:23:16,661 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:16,661 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:16,661 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:16,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325856659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325856659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325856661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,673 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:16,673 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:16,673 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,673 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/e28886c7ae04451e80400a8c4c719f5a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/714a9eccf26f407899f33fd3ea9c11e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/24777dc42bf147a2a817e22220285afb] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=36.8 K 2024-12-04T15:23:16,675 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e28886c7ae04451e80400a8c4c719f5a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733325794147 2024-12-04T15:23:16,680 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 714a9eccf26f407899f33fd3ea9c11e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733325794485 2024-12-04T15:23:16,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325856670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,683 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 24777dc42bf147a2a817e22220285afb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733325794682 2024-12-04T15:23:16,731 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#439 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:16,731 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/6a57eda5be4f432b9a9653a7453dcfd1 is 50, key is test_row_0/C:col10/1733325794695/Put/seqid=0 2024-12-04T15:23:16,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742344_1520 (size=13221) 2024-12-04T15:23:16,771 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/6a57eda5be4f432b9a9653a7453dcfd1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6a57eda5be4f432b9a9653a7453dcfd1 2024-12-04T15:23:16,777 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 6a57eda5be4f432b9a9653a7453dcfd1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:16,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:16,777 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=13, startTime=1733325796101; duration=0sec 2024-12-04T15:23:16,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:16,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:16,800 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/824c4a30ba3b4198b1493f3c55c4e7f3 2024-12-04T15:23:16,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103 is 50, key is test_row_0/C:col10/1733325795366/Put/seqid=0 2024-12-04T15:23:16,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742345_1521 (size=12301) 2024-12-04T15:23:16,858 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103 2024-12-04T15:23:16,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/03f8ce30b0204e67ad3142de6bf3ba8d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d 2024-12-04T15:23:16,871 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d, entries=150, sequenceid=405, filesize=30.5 K 2024-12-04T15:23:16,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325856871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325856871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/824c4a30ba3b4198b1493f3c55c4e7f3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/824c4a30ba3b4198b1493f3c55c4e7f3 2024-12-04T15:23:16,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325856871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325856883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:16,900 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/824c4a30ba3b4198b1493f3c55c4e7f3, entries=150, sequenceid=405, filesize=12.0 K 2024-12-04T15:23:16,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103 2024-12-04T15:23:16,905 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103, entries=150, sequenceid=405, filesize=12.0 K 2024-12-04T15:23:16,906 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 96aa8a9c538d7176a93d416eb9d9bfac in 735ms, sequenceid=405, compaction requested=false 2024-12-04T15:23:16,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:16,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:16,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-04T15:23:16,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-04T15:23:16,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-04T15:23:16,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4890 sec 2024-12-04T15:23:16,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.4940 sec 2024-12-04T15:23:17,053 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/ca1f4b69bc364adfbec6a86084ea2fba as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ca1f4b69bc364adfbec6a86084ea2fba 2024-12-04T15:23:17,058 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into ca1f4b69bc364adfbec6a86084ea2fba(size=31.4 K), total size for store is 61.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:17,059 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:17,059 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=13, startTime=1733325796100; duration=0sec 2024-12-04T15:23:17,059 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:17,059 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:17,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:23:17,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:17,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:17,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:17,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:17,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:17,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:17,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:17,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044ad9b14b8e214badb49f105122849b24_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:17,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742346_1522 (size=14994) 2024-12-04T15:23:17,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325857287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325857289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325857295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325857296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46862 deadline: 1733325857381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,393 DEBUG [Thread-1801 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18261 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:23:17,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325857401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325857401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325857404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325857404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325857608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325857609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325857616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325857617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,628 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:17,652 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044ad9b14b8e214badb49f105122849b24_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044ad9b14b8e214badb49f105122849b24_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:17,657 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/dcc78adfa2d34f35af6fd7e43560316c, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:17,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/dcc78adfa2d34f35af6fd7e43560316c is 175, key is test_row_0/A:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:17,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742347_1523 (size=39949) 2024-12-04T15:23:17,824 DEBUG [Thread-1812 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x585edb09 to 127.0.0.1:55739 2024-12-04T15:23:17,824 DEBUG [Thread-1812 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:17,825 DEBUG [Thread-1810 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x299dc25b to 127.0.0.1:55739 2024-12-04T15:23:17,825 DEBUG [Thread-1810 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:17,832 DEBUG [Thread-1814 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x056c82ce to 127.0.0.1:55739 2024-12-04T15:23:17,832 DEBUG [Thread-1816 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e7f0457 to 127.0.0.1:55739 2024-12-04T15:23:17,832 DEBUG [Thread-1814 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:17,832 DEBUG [Thread-1816 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:17,836 DEBUG [Thread-1818 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a7db91c to 127.0.0.1:55739 2024-12-04T15:23:17,836 DEBUG [Thread-1818 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:17,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325857918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325857918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325857922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:17,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:17,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325857922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:18,113 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/dcc78adfa2d34f35af6fd7e43560316c 2024-12-04T15:23:18,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/38e8ca939979462c9c5934511c03390c is 50, key is test_row_0/B:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:18,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742348_1524 (size=12301) 2024-12-04T15:23:18,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:18,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46838 deadline: 1733325858420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:18,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:18,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46860 deadline: 1733325858424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:18,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:18,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46824 deadline: 1733325858424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:18,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46796 deadline: 1733325858425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:18,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/38e8ca939979462c9c5934511c03390c 2024-12-04T15:23:18,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3d754c4c6b7c4114a2990c04b12b3bdf is 50, key is test_row_0/C:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-04T15:23:18,534 INFO [Thread-1809 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-04T15:23:18,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742349_1525 (size=12301) 2024-12-04T15:23:18,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3d754c4c6b7c4114a2990c04b12b3bdf 2024-12-04T15:23:18,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/dcc78adfa2d34f35af6fd7e43560316c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c 2024-12-04T15:23:18,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c, entries=200, sequenceid=422, filesize=39.0 K 2024-12-04T15:23:18,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/38e8ca939979462c9c5934511c03390c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/38e8ca939979462c9c5934511c03390c 2024-12-04T15:23:18,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/38e8ca939979462c9c5934511c03390c, entries=150, sequenceid=422, filesize=12.0 K 2024-12-04T15:23:18,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/3d754c4c6b7c4114a2990c04b12b3bdf as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3d754c4c6b7c4114a2990c04b12b3bdf 2024-12-04T15:23:18,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3d754c4c6b7c4114a2990c04b12b3bdf, entries=150, sequenceid=422, filesize=12.0 K 2024-12-04T15:23:18,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 96aa8a9c538d7176a93d416eb9d9bfac in 1769ms, sequenceid=422, compaction requested=true 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:18,953 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:18,953 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96aa8a9c538d7176a93d416eb9d9bfac:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:18,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:18,954 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:18,954 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/A is initiating minor compaction (all files) 2024-12-04T15:23:18,954 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/A in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:18,954 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:18,954 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/B is initiating minor compaction (all files) 2024-12-04T15:23:18,954 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ca1f4b69bc364adfbec6a86084ea2fba, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=101.0 K 2024-12-04T15:23:18,954 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:18,954 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/B in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:18,954 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ca1f4b69bc364adfbec6a86084ea2fba, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c] 2024-12-04T15:23:18,954 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/549db563a45645d6a75ae2ae5e9c11d3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/824c4a30ba3b4198b1493f3c55c4e7f3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/38e8ca939979462c9c5934511c03390c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=36.9 K 2024-12-04T15:23:18,955 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca1f4b69bc364adfbec6a86084ea2fba, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733325794682 2024-12-04T15:23:18,955 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 549db563a45645d6a75ae2ae5e9c11d3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733325794682 2024-12-04T15:23:18,955 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03f8ce30b0204e67ad3142de6bf3ba8d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733325795363 2024-12-04T15:23:18,955 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 824c4a30ba3b4198b1493f3c55c4e7f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733325795363 2024-12-04T15:23:18,955 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e8ca939979462c9c5934511c03390c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733325796530 2024-12-04T15:23:18,955 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcc78adfa2d34f35af6fd7e43560316c, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733325796530 2024-12-04T15:23:18,963 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#B#compaction#444 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:18,963 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:18,963 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/124db02c9ba94c238bdf23f0129c1d4e is 50, key is test_row_0/B:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:18,965 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204fcb1ba919d0f4322a5bc460bc35c8051_96aa8a9c538d7176a93d416eb9d9bfac store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:18,969 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204fcb1ba919d0f4322a5bc460bc35c8051_96aa8a9c538d7176a93d416eb9d9bfac, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:18,969 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fcb1ba919d0f4322a5bc460bc35c8051_96aa8a9c538d7176a93d416eb9d9bfac because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742350_1526 (size=13323) 2024-12-04T15:23:18,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742351_1527 (size=4469) 2024-12-04T15:23:19,375 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#A#compaction#445 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:19,376 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/ed66728c52ee4e71a76171484845c5f8 is 175, key is test_row_0/A:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:19,377 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/124db02c9ba94c238bdf23f0129c1d4e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/124db02c9ba94c238bdf23f0129c1d4e 2024-12-04T15:23:19,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742352_1528 (size=32277) 2024-12-04T15:23:19,382 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/B of 96aa8a9c538d7176a93d416eb9d9bfac into 124db02c9ba94c238bdf23f0129c1d4e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:19,382 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:19,382 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/B, priority=13, startTime=1733325798953; duration=0sec 2024-12-04T15:23:19,382 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:19,382 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:B 2024-12-04T15:23:19,382 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:19,383 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:19,383 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 96aa8a9c538d7176a93d416eb9d9bfac/C is initiating minor compaction (all files) 2024-12-04T15:23:19,383 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96aa8a9c538d7176a93d416eb9d9bfac/C in TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:19,383 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6a57eda5be4f432b9a9653a7453dcfd1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3d754c4c6b7c4114a2990c04b12b3bdf] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp, totalSize=36.9 K 2024-12-04T15:23:19,384 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a57eda5be4f432b9a9653a7453dcfd1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733325794682 2024-12-04T15:23:19,384 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a0b4be1a6d4e4cafaa2fd44fa5bbe103, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733325795363 2024-12-04T15:23:19,384 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d754c4c6b7c4114a2990c04b12b3bdf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733325796530 2024-12-04T15:23:19,392 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96aa8a9c538d7176a93d416eb9d9bfac#C#compaction#446 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:19,392 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/7dd02cb488ed4fb49992ab33657466b6 is 50, key is test_row_0/C:col10/1733325797183/Put/seqid=0 2024-12-04T15:23:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742353_1529 (size=13323) 2024-12-04T15:23:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:19,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:19,430 DEBUG [Thread-1807 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x047ef789 to 127.0.0.1:55739 2024-12-04T15:23:19,430 DEBUG [Thread-1807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:19,430 DEBUG [Thread-1803 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x25ec2d5b to 127.0.0.1:55739 2024-12-04T15:23:19,430 DEBUG [Thread-1803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:19,432 DEBUG [Thread-1805 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c4acc48 to 127.0.0.1:55739 2024-12-04T15:23:19,432 DEBUG [Thread-1805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:19,434 DEBUG [Thread-1799 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0733701c to 127.0.0.1:55739 2024-12-04T15:23:19,434 DEBUG [Thread-1799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:19,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120492636139d2194506bd5a192be53b5ab0_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_0/A:col10/1733325799428/Put/seqid=0 2024-12-04T15:23:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742354_1530 (size=12454) 2024-12-04T15:23:19,786 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/ed66728c52ee4e71a76171484845c5f8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ed66728c52ee4e71a76171484845c5f8 2024-12-04T15:23:19,790 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/A of 96aa8a9c538d7176a93d416eb9d9bfac into ed66728c52ee4e71a76171484845c5f8(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:19,790 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:19,790 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/A, priority=13, startTime=1733325798953; duration=0sec 2024-12-04T15:23:19,790 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:19,790 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:A 2024-12-04T15:23:19,801 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/7dd02cb488ed4fb49992ab33657466b6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7dd02cb488ed4fb49992ab33657466b6 2024-12-04T15:23:19,804 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96aa8a9c538d7176a93d416eb9d9bfac/C of 96aa8a9c538d7176a93d416eb9d9bfac into 7dd02cb488ed4fb49992ab33657466b6(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:19,805 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:19,805 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac., storeName=96aa8a9c538d7176a93d416eb9d9bfac/C, priority=13, startTime=1733325798953; duration=0sec 2024-12-04T15:23:19,805 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:19,805 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96aa8a9c538d7176a93d416eb9d9bfac:C 2024-12-04T15:23:19,840 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:19,843 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120492636139d2194506bd5a192be53b5ab0_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120492636139d2194506bd5a192be53b5ab0_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:19,844 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f2b31b9239aa4581ba33f96f341617ef, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:19,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f2b31b9239aa4581ba33f96f341617ef is 175, key is test_row_0/A:col10/1733325799428/Put/seqid=0 2024-12-04T15:23:19,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742355_1531 (size=31255) 2024-12-04T15:23:20,249 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=447, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f2b31b9239aa4581ba33f96f341617ef 2024-12-04T15:23:20,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/b36268f78f434de4802004aea0f2fd05 is 50, key is test_row_0/B:col10/1733325799428/Put/seqid=0 2024-12-04T15:23:20,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742356_1532 (size=12301) 2024-12-04T15:23:20,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/b36268f78f434de4802004aea0f2fd05 2024-12-04T15:23:20,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/751a45a895474049aa45c2ed5d8e7167 is 50, key is test_row_0/C:col10/1733325799428/Put/seqid=0 2024-12-04T15:23:20,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742357_1533 (size=12301) 2024-12-04T15:23:21,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/751a45a895474049aa45c2ed5d8e7167 2024-12-04T15:23:21,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/f2b31b9239aa4581ba33f96f341617ef as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f2b31b9239aa4581ba33f96f341617ef 2024-12-04T15:23:21,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f2b31b9239aa4581ba33f96f341617ef, entries=150, sequenceid=447, filesize=30.5 K 2024-12-04T15:23:21,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/b36268f78f434de4802004aea0f2fd05 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b36268f78f434de4802004aea0f2fd05 2024-12-04T15:23:21,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b36268f78f434de4802004aea0f2fd05, entries=150, sequenceid=447, filesize=12.0 K 2024-12-04T15:23:21,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/751a45a895474049aa45c2ed5d8e7167 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/751a45a895474049aa45c2ed5d8e7167 2024-12-04T15:23:21,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/751a45a895474049aa45c2ed5d8e7167, entries=150, sequenceid=447, filesize=12.0 K 2024-12-04T15:23:21,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=13.42 KB/13740 for 96aa8a9c538d7176a93d416eb9d9bfac in 1654ms, sequenceid=447, compaction requested=false 2024-12-04T15:23:21,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:22,214 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T15:23:26,617 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/de1666a8facf4903908756dc23dc87d2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7b720c7f5c8a4a6c945ae1a549a84a81, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f4f1c95695fa4b68aeecdfcadb0c32bf, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7a282a99985347529f0bc6753d8953e4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/510a7708c2be4068bb400a3978c77f5d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e473ab8328f3453bb20e3f9e8b8cbbfd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e148155a99a245a8813c5441e578d245, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ca1f4b69bc364adfbec6a86084ea2fba, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c] to archive 2024-12-04T15:23:26,618 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:23:26,624 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/36817bd8bc2543fdb4557a56a7a58e7d 2024-12-04T15:23:26,626 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3ccf86a2938d463b9899542dbbe7d366 2024-12-04T15:23:26,632 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/c09d8f40849f4609a393de78a9f9b701 2024-12-04T15:23:26,634 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/de1666a8facf4903908756dc23dc87d2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/de1666a8facf4903908756dc23dc87d2 2024-12-04T15:23:26,635 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/6144d61a486549ab9b6ae479d4b433c5 2024-12-04T15:23:26,636 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a65aadcbaa6d4ea7b3d4b0c33e4138b0 2024-12-04T15:23:26,638 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7b720c7f5c8a4a6c945ae1a549a84a81 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7b720c7f5c8a4a6c945ae1a549a84a81 2024-12-04T15:23:26,639 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/da6ee0c75ed043a0815f30c5843aee85 2024-12-04T15:23:26,641 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/92092c1f3af945d088ce7bd732245a83 2024-12-04T15:23:26,642 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f4f1c95695fa4b68aeecdfcadb0c32bf to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f4f1c95695fa4b68aeecdfcadb0c32bf 2024-12-04T15:23:26,643 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/42ccde258ae54b57a6a347140d5c772c 2024-12-04T15:23:26,644 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/0aa30942c1834ff59ad942b9978e164f 2024-12-04T15:23:26,646 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7a282a99985347529f0bc6753d8953e4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/7a282a99985347529f0bc6753d8953e4 2024-12-04T15:23:26,647 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/a2c1bab8a7b24c859960b6a13a55946d 2024-12-04T15:23:26,648 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/5640154e81ab4ac5a86aada16243023c 2024-12-04T15:23:26,650 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e7f6717c0f5647feb66a1ce1774f9071 2024-12-04T15:23:26,651 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/206c312be65e4d5795b1000d5679474e 2024-12-04T15:23:26,656 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/510a7708c2be4068bb400a3978c77f5d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/510a7708c2be4068bb400a3978c77f5d 2024-12-04T15:23:26,658 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/857704ebb94749008f1043b9d07e8612 2024-12-04T15:23:26,664 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22c8d0d3be084e2eb979ef3caf88b67f 2024-12-04T15:23:26,666 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e473ab8328f3453bb20e3f9e8b8cbbfd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e473ab8328f3453bb20e3f9e8b8cbbfd 2024-12-04T15:23:26,667 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/93b89fc6c1144e198b03aed9d324d5ce 2024-12-04T15:23:26,669 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/22bf0ee74d704d3fbace4d3bfed49234 2024-12-04T15:23:26,670 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/1f9edfad4e314323b3e799f77b6b9281 2024-12-04T15:23:26,671 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e148155a99a245a8813c5441e578d245 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/e148155a99a245a8813c5441e578d245 2024-12-04T15:23:26,672 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f7d7712ab2564dc0a56feb6fc369e534 2024-12-04T15:23:26,673 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/3976964b82554bb68185a54432adbf00 2024-12-04T15:23:26,678 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ca1f4b69bc364adfbec6a86084ea2fba to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ca1f4b69bc364adfbec6a86084ea2fba 2024-12-04T15:23:26,679 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/03f8ce30b0204e67ad3142de6bf3ba8d 2024-12-04T15:23:26,681 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/dcc78adfa2d34f35af6fd7e43560316c 2024-12-04T15:23:26,686 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d7eeab28245041998d07f546bfe86365, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/79ed233685b14c5091c96ca25d3978c0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/81388d8563f346f7b57e7172387e901f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1024cc3ff2f34bc3b8dace57707ac27b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1e5fdb41dd2f46c294fec8393610276e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/36cb77704df048a18ba4d2c8a46b12f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8c76d1f8ef654b0b98f1f43195ed9ea3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7e91d9f45d664b488715528c78b424ee, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/3f0e7d8113424af7beaa4e2dc209e078, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/330396111f574a768e4e28a1280f1e0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d9f5744d7bce4288a12585c3e3e8cbd1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/86ec2511cf5741808aa02ec263fce3e3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c7fd48e0cea6448a92c3b41f3959147b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/82e815d00ce444c1b58ac91cdf32e244, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/16b23c115c534beb90d25209784fd388, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e9b90727d1da497eb579ac6e7877e5f0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b6925f80ee1c4ec99cb1368d1a555152, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7c21c86a2fe44474b3b6562742120b2c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/f3853f57af5440f8b41888edbcf44766, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1fc7133e5bd745ab9316927e09117c9d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e8640d529078495fa491851e8b631a6c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/6f91e5dfc9a84854941787a1cdc871a7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c3050df0f471407ab34b6b39994c72b7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1f1c140383fb43e0b60d6a40115f9bb7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7ee5a574074442d3a5bc27ec9c7fd69b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/de866ae3808e474a811ad53274e27401, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/549db563a45645d6a75ae2ae5e9c11d3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8b4abb7e85d449d7a3bdc71afd59cec1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/824c4a30ba3b4198b1493f3c55c4e7f3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/38e8ca939979462c9c5934511c03390c] to archive 2024-12-04T15:23:26,689 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:23:26,692 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d7eeab28245041998d07f546bfe86365 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d7eeab28245041998d07f546bfe86365 2024-12-04T15:23:26,693 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/79ed233685b14c5091c96ca25d3978c0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/79ed233685b14c5091c96ca25d3978c0 2024-12-04T15:23:26,695 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/81388d8563f346f7b57e7172387e901f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/81388d8563f346f7b57e7172387e901f 2024-12-04T15:23:26,696 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1024cc3ff2f34bc3b8dace57707ac27b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1024cc3ff2f34bc3b8dace57707ac27b 2024-12-04T15:23:26,704 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1e5fdb41dd2f46c294fec8393610276e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1e5fdb41dd2f46c294fec8393610276e 2024-12-04T15:23:26,706 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/36cb77704df048a18ba4d2c8a46b12f9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/36cb77704df048a18ba4d2c8a46b12f9 2024-12-04T15:23:26,707 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8c76d1f8ef654b0b98f1f43195ed9ea3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8c76d1f8ef654b0b98f1f43195ed9ea3 2024-12-04T15:23:26,712 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7e91d9f45d664b488715528c78b424ee to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7e91d9f45d664b488715528c78b424ee 2024-12-04T15:23:26,716 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/3f0e7d8113424af7beaa4e2dc209e078 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/3f0e7d8113424af7beaa4e2dc209e078 2024-12-04T15:23:26,717 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/330396111f574a768e4e28a1280f1e0c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/330396111f574a768e4e28a1280f1e0c 2024-12-04T15:23:26,719 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d9f5744d7bce4288a12585c3e3e8cbd1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/d9f5744d7bce4288a12585c3e3e8cbd1 2024-12-04T15:23:26,721 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/86ec2511cf5741808aa02ec263fce3e3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/86ec2511cf5741808aa02ec263fce3e3 2024-12-04T15:23:26,722 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c7fd48e0cea6448a92c3b41f3959147b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c7fd48e0cea6448a92c3b41f3959147b 2024-12-04T15:23:26,731 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/82e815d00ce444c1b58ac91cdf32e244 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/82e815d00ce444c1b58ac91cdf32e244 2024-12-04T15:23:26,736 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/16b23c115c534beb90d25209784fd388 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/16b23c115c534beb90d25209784fd388 2024-12-04T15:23:26,737 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e9b90727d1da497eb579ac6e7877e5f0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e9b90727d1da497eb579ac6e7877e5f0 2024-12-04T15:23:26,739 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b6925f80ee1c4ec99cb1368d1a555152 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b6925f80ee1c4ec99cb1368d1a555152 2024-12-04T15:23:26,740 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7c21c86a2fe44474b3b6562742120b2c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7c21c86a2fe44474b3b6562742120b2c 2024-12-04T15:23:26,744 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/f3853f57af5440f8b41888edbcf44766 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/f3853f57af5440f8b41888edbcf44766 2024-12-04T15:23:26,745 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1fc7133e5bd745ab9316927e09117c9d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1fc7133e5bd745ab9316927e09117c9d 2024-12-04T15:23:26,746 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e8640d529078495fa491851e8b631a6c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/e8640d529078495fa491851e8b631a6c 2024-12-04T15:23:26,747 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/6f91e5dfc9a84854941787a1cdc871a7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/6f91e5dfc9a84854941787a1cdc871a7 2024-12-04T15:23:26,749 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c3050df0f471407ab34b6b39994c72b7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/c3050df0f471407ab34b6b39994c72b7 2024-12-04T15:23:26,750 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1f1c140383fb43e0b60d6a40115f9bb7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/1f1c140383fb43e0b60d6a40115f9bb7 2024-12-04T15:23:26,751 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7ee5a574074442d3a5bc27ec9c7fd69b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/7ee5a574074442d3a5bc27ec9c7fd69b 2024-12-04T15:23:26,752 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/de866ae3808e474a811ad53274e27401 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/de866ae3808e474a811ad53274e27401 2024-12-04T15:23:26,753 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/549db563a45645d6a75ae2ae5e9c11d3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/549db563a45645d6a75ae2ae5e9c11d3 2024-12-04T15:23:26,754 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8b4abb7e85d449d7a3bdc71afd59cec1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/8b4abb7e85d449d7a3bdc71afd59cec1 2024-12-04T15:23:26,755 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/824c4a30ba3b4198b1493f3c55c4e7f3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/824c4a30ba3b4198b1493f3c55c4e7f3 2024-12-04T15:23:26,756 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/38e8ca939979462c9c5934511c03390c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/38e8ca939979462c9c5934511c03390c 2024-12-04T15:23:26,764 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/52358fa9c3244cce987ad0040dc9d26b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a5baf56476f048158fa26575623df910, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/c904ca0f481f474b87642395414201fb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d7245d556264f88bbd81491ec11c59b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ac814908fbfe439a8b764455eb76cf9c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/684a7becfc6f4c7cb17a71a74d5793af, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/949abcf55d0544b0b6aadbe3244738c0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7a291a18a4cb4f308670a09a159e2a82, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/8a62868eaf7b4585b69c4917215dfb67, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3350d1cc23024b38aa475de4c79ce3f9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/64291b3037254b2daa5366a59828e9e7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/28fb4b67426b4989b70f7f267d71d536, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6c0eb835fbba48c6a13ddce0ea526b0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/bc8362d916fa4cda8dad8c9af022faf7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/fe26157428fc403fbfa825e23f1fce32, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3c98eb39e43e40cabbe8e7b4064c4fad, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d6f8ec5814542589506e26b26a2ac3a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/07f53bf378074ad9be9c12a14d321261, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3ffe7dc91db045d4a2eead8f649486e6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/577748ea24f04ab3a06fddb2166ba257, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ab45264411b44a4ab77968135b348e55, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/985ebaa99f8b4f64bb7f46c8114e1e36, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/024351512e8e4c6b901adc1cfb61d4b2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/e28886c7ae04451e80400a8c4c719f5a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/227dd4298e024f6297f893d815895da8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/714a9eccf26f407899f33fd3ea9c11e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6a57eda5be4f432b9a9653a7453dcfd1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/24777dc42bf147a2a817e22220285afb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3d754c4c6b7c4114a2990c04b12b3bdf] to archive 2024-12-04T15:23:26,765 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:23:26,769 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/52358fa9c3244cce987ad0040dc9d26b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/52358fa9c3244cce987ad0040dc9d26b 2024-12-04T15:23:26,773 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a5baf56476f048158fa26575623df910 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a5baf56476f048158fa26575623df910 2024-12-04T15:23:26,776 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/c904ca0f481f474b87642395414201fb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/c904ca0f481f474b87642395414201fb 2024-12-04T15:23:26,777 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d7245d556264f88bbd81491ec11c59b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d7245d556264f88bbd81491ec11c59b 2024-12-04T15:23:26,778 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ac814908fbfe439a8b764455eb76cf9c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ac814908fbfe439a8b764455eb76cf9c 2024-12-04T15:23:26,779 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/684a7becfc6f4c7cb17a71a74d5793af to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/684a7becfc6f4c7cb17a71a74d5793af 2024-12-04T15:23:26,781 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/949abcf55d0544b0b6aadbe3244738c0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/949abcf55d0544b0b6aadbe3244738c0 2024-12-04T15:23:26,782 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7a291a18a4cb4f308670a09a159e2a82 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7a291a18a4cb4f308670a09a159e2a82 2024-12-04T15:23:26,784 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/8a62868eaf7b4585b69c4917215dfb67 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/8a62868eaf7b4585b69c4917215dfb67 2024-12-04T15:23:26,785 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3350d1cc23024b38aa475de4c79ce3f9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3350d1cc23024b38aa475de4c79ce3f9 2024-12-04T15:23:26,787 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/64291b3037254b2daa5366a59828e9e7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/64291b3037254b2daa5366a59828e9e7 2024-12-04T15:23:26,789 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/28fb4b67426b4989b70f7f267d71d536 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/28fb4b67426b4989b70f7f267d71d536 2024-12-04T15:23:26,790 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6c0eb835fbba48c6a13ddce0ea526b0c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6c0eb835fbba48c6a13ddce0ea526b0c 2024-12-04T15:23:26,791 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/bc8362d916fa4cda8dad8c9af022faf7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/bc8362d916fa4cda8dad8c9af022faf7 2024-12-04T15:23:26,792 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/fe26157428fc403fbfa825e23f1fce32 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/fe26157428fc403fbfa825e23f1fce32 2024-12-04T15:23:26,793 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3c98eb39e43e40cabbe8e7b4064c4fad to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3c98eb39e43e40cabbe8e7b4064c4fad 2024-12-04T15:23:26,794 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d6f8ec5814542589506e26b26a2ac3a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/4d6f8ec5814542589506e26b26a2ac3a 2024-12-04T15:23:26,796 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/07f53bf378074ad9be9c12a14d321261 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/07f53bf378074ad9be9c12a14d321261 2024-12-04T15:23:26,797 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3ffe7dc91db045d4a2eead8f649486e6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3ffe7dc91db045d4a2eead8f649486e6 2024-12-04T15:23:26,798 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/577748ea24f04ab3a06fddb2166ba257 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/577748ea24f04ab3a06fddb2166ba257 2024-12-04T15:23:26,799 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ab45264411b44a4ab77968135b348e55 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/ab45264411b44a4ab77968135b348e55 2024-12-04T15:23:26,800 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/985ebaa99f8b4f64bb7f46c8114e1e36 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/985ebaa99f8b4f64bb7f46c8114e1e36 2024-12-04T15:23:26,801 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/024351512e8e4c6b901adc1cfb61d4b2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/024351512e8e4c6b901adc1cfb61d4b2 2024-12-04T15:23:26,806 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/e28886c7ae04451e80400a8c4c719f5a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/e28886c7ae04451e80400a8c4c719f5a 2024-12-04T15:23:26,807 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/227dd4298e024f6297f893d815895da8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/227dd4298e024f6297f893d815895da8 2024-12-04T15:23:26,808 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/714a9eccf26f407899f33fd3ea9c11e5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/714a9eccf26f407899f33fd3ea9c11e5 2024-12-04T15:23:26,811 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6a57eda5be4f432b9a9653a7453dcfd1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/6a57eda5be4f432b9a9653a7453dcfd1 2024-12-04T15:23:26,813 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/24777dc42bf147a2a817e22220285afb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/24777dc42bf147a2a817e22220285afb 2024-12-04T15:23:26,816 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/a0b4be1a6d4e4cafaa2fd44fa5bbe103 2024-12-04T15:23:26,817 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/645c2dbfef2e:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3d754c4c6b7c4114a2990c04b12b3bdf to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/3d754c4c6b7c4114a2990c04b12b3bdf 2024-12-04T15:23:27,397 DEBUG [Thread-1801 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x051f8b5d to 127.0.0.1:55739 2024-12-04T15:23:27,397 DEBUG [Thread-1801 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-04T15:23:27,397 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1531 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4593 rows 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1508 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4524 rows 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1516 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4547 rows 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1529 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4586 rows 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1521 2024-12-04T15:23:27,398 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4563 rows 2024-12-04T15:23:27,398 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:23:27,398 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2914d173 to 127.0.0.1:55739 2024-12-04T15:23:27,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:27,403 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-04T15:23:27,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-04T15:23:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:27,413 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325807413"}]},"ts":"1733325807413"} 2024-12-04T15:23:27,414 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-04T15:23:27,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-04T15:23:27,417 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-04T15:23:27,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:23:27,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, UNASSIGN}] 2024-12-04T15:23:27,420 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, UNASSIGN 2024-12-04T15:23:27,421 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:27,422 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:23:27,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; CloseRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:23:27,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-04T15:23:27,574 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:27,574 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:27,574 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 96aa8a9c538d7176a93d416eb9d9bfac, disabling compactions & flushes 2024-12-04T15:23:27,575 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. after waiting 0 ms 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:27,575 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 96aa8a9c538d7176a93d416eb9d9bfac 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=A 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=B 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96aa8a9c538d7176a93d416eb9d9bfac, store=C 2024-12-04T15:23:27,575 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:27,584 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412040f93a25a0539465c874af6b1662e5380_96aa8a9c538d7176a93d416eb9d9bfac is 50, key is test_row_1/A:col10/1733325807395/Put/seqid=0 2024-12-04T15:23:27,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742358_1534 (size=9914) 2024-12-04T15:23:27,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-04T15:23:27,995 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:27,999 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412040f93a25a0539465c874af6b1662e5380_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412040f93a25a0539465c874af6b1662e5380_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,000 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/4fcfdc8a1bb54a66a8fc1cac933a5060, store: [table=TestAcidGuarantees family=A region=96aa8a9c538d7176a93d416eb9d9bfac] 2024-12-04T15:23:28,001 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/4fcfdc8a1bb54a66a8fc1cac933a5060 is 175, key is test_row_1/A:col10/1733325807395/Put/seqid=0 2024-12-04T15:23:28,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742359_1535 (size=22561) 2024-12-04T15:23:28,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-04T15:23:28,406 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=455, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/4fcfdc8a1bb54a66a8fc1cac933a5060 2024-12-04T15:23:28,414 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/0921dee130e54e028b7ac081eba9d86f is 50, key is test_row_1/B:col10/1733325807395/Put/seqid=0 2024-12-04T15:23:28,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742360_1536 (size=9857) 2024-12-04T15:23:28,430 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/0921dee130e54e028b7ac081eba9d86f 2024-12-04T15:23:28,436 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/d3ebddae48ce439491b7f8bb681cf28d is 50, key is test_row_1/C:col10/1733325807395/Put/seqid=0 2024-12-04T15:23:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742361_1537 (size=9857) 2024-12-04T15:23:28,443 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/d3ebddae48ce439491b7f8bb681cf28d 2024-12-04T15:23:28,447 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/A/4fcfdc8a1bb54a66a8fc1cac933a5060 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/4fcfdc8a1bb54a66a8fc1cac933a5060 2024-12-04T15:23:28,451 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/4fcfdc8a1bb54a66a8fc1cac933a5060, entries=100, sequenceid=455, filesize=22.0 K 2024-12-04T15:23:28,452 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/B/0921dee130e54e028b7ac081eba9d86f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/0921dee130e54e028b7ac081eba9d86f 2024-12-04T15:23:28,456 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/0921dee130e54e028b7ac081eba9d86f, entries=100, sequenceid=455, filesize=9.6 K 2024-12-04T15:23:28,460 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/.tmp/C/d3ebddae48ce439491b7f8bb681cf28d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/d3ebddae48ce439491b7f8bb681cf28d 2024-12-04T15:23:28,464 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/d3ebddae48ce439491b7f8bb681cf28d, entries=100, sequenceid=455, filesize=9.6 K 2024-12-04T15:23:28,465 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 96aa8a9c538d7176a93d416eb9d9bfac in 890ms, sequenceid=455, compaction requested=true 2024-12-04T15:23:28,472 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/recovered.edits/458.seqid, newMaxSeqId=458, maxSeqId=4 2024-12-04T15:23:28,473 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac. 2024-12-04T15:23:28,473 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 96aa8a9c538d7176a93d416eb9d9bfac: 2024-12-04T15:23:28,475 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,475 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=96aa8a9c538d7176a93d416eb9d9bfac, regionState=CLOSED 2024-12-04T15:23:28,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-04T15:23:28,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseRegionProcedure 96aa8a9c538d7176a93d416eb9d9bfac, server=645c2dbfef2e,42169,1733325683856 in 1.0540 sec 2024-12-04T15:23:28,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-04T15:23:28,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=96aa8a9c538d7176a93d416eb9d9bfac, UNASSIGN in 1.0600 sec 2024-12-04T15:23:28,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-04T15:23:28,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.0640 sec 2024-12-04T15:23:28,484 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325808484"}]},"ts":"1733325808484"} 2024-12-04T15:23:28,485 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-04T15:23:28,487 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-04T15:23:28,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0840 sec 2024-12-04T15:23:28,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-04T15:23:28,519 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-04T15:23:28,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-04T15:23:28,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,522 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-04T15:23:28,523 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=120, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,529 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,543 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/recovered.edits] 2024-12-04T15:23:28,556 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/4fcfdc8a1bb54a66a8fc1cac933a5060 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/4fcfdc8a1bb54a66a8fc1cac933a5060 2024-12-04T15:23:28,562 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ed66728c52ee4e71a76171484845c5f8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/ed66728c52ee4e71a76171484845c5f8 2024-12-04T15:23:28,563 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f2b31b9239aa4581ba33f96f341617ef to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/A/f2b31b9239aa4581ba33f96f341617ef 2024-12-04T15:23:28,566 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/0921dee130e54e028b7ac081eba9d86f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/0921dee130e54e028b7ac081eba9d86f 2024-12-04T15:23:28,567 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/124db02c9ba94c238bdf23f0129c1d4e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/124db02c9ba94c238bdf23f0129c1d4e 2024-12-04T15:23:28,568 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b36268f78f434de4802004aea0f2fd05 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/B/b36268f78f434de4802004aea0f2fd05 2024-12-04T15:23:28,571 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/751a45a895474049aa45c2ed5d8e7167 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/751a45a895474049aa45c2ed5d8e7167 2024-12-04T15:23:28,573 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7dd02cb488ed4fb49992ab33657466b6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/7dd02cb488ed4fb49992ab33657466b6 2024-12-04T15:23:28,574 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/d3ebddae48ce439491b7f8bb681cf28d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/C/d3ebddae48ce439491b7f8bb681cf28d 2024-12-04T15:23:28,577 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/recovered.edits/458.seqid to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac/recovered.edits/458.seqid 2024-12-04T15:23:28,577 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,578 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-04T15:23:28,578 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-04T15:23:28,579 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-04T15:23:28,598 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204004742bfe52c4f609694f5e54e994c61_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204004742bfe52c4f609694f5e54e994c61_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,599 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120401394fc385be4670b57000dab1005237_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120401394fc385be4670b57000dab1005237_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,600 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412040f93a25a0539465c874af6b1662e5380_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412040f93a25a0539465c874af6b1662e5380_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,601 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120421cc1df9d0a947c0a4b4ae2300543f63_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120421cc1df9d0a947c0a4b4ae2300543f63_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,602 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042ac62c28f4f145b4bdfa1391bdb2ab1d_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042ac62c28f4f145b4bdfa1391bdb2ab1d_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,603 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204394d6fe5a1f7479c88aac4055d1df28b_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204394d6fe5a1f7479c88aac4055d1df28b_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,604 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412043c11f130e31a4c459c6d41301b494bb2_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412043c11f130e31a4c459c6d41301b494bb2_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,605 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204434276281e8b42319c47c6986466327d_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204434276281e8b42319c47c6986466327d_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,606 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044ad9b14b8e214badb49f105122849b24_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044ad9b14b8e214badb49f105122849b24_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,607 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046400492c880b49018966167ddfddc996_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046400492c880b49018966167ddfddc996_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,608 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120471470eebbcac45b6b429a57d4c645dea_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120471470eebbcac45b6b429a57d4c645dea_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,610 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047a87662cda5d4403a470a47a82787a24_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047a87662cda5d4403a470a47a82787a24_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,612 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204802717efd0184cbe9f89404241d5e8e4_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204802717efd0184cbe9f89404241d5e8e4_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,613 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204918d855970a14a60ad48615a37776966_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204918d855970a14a60ad48615a37776966_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,614 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120492636139d2194506bd5a192be53b5ab0_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120492636139d2194506bd5a192be53b5ab0_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,615 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204a7598e859d52400ab3cca7dfde47c8c7_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204a7598e859d52400ab3cca7dfde47c8c7_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,616 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204aa54acca9fc545019f0413e64578a4a8_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204aa54acca9fc545019f0413e64578a4a8_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,620 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204b8706a6d514e4268b7d94a2423d7cf81_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204b8706a6d514e4268b7d94a2423d7cf81_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,621 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c01262badb4d475aa08a8e206dc022ad_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c01262badb4d475aa08a8e206dc022ad_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,623 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c41cc49f6f674d4cb8fa070dd4621ec5_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c41cc49f6f674d4cb8fa070dd4621ec5_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-04T15:23:28,624 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c87148fef8114a848634fe064891ac85_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c87148fef8114a848634fe064891ac85_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,626 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204cb13ef43713d48f586b8d93f0599a0cf_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204cb13ef43713d48f586b8d93f0599a0cf_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,627 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d2996ab1c61345f5bb9b740f11d18b7b_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204d2996ab1c61345f5bb9b740f11d18b7b_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,629 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e52e6255edc94c32865a1b880503621d_96aa8a9c538d7176a93d416eb9d9bfac to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204e52e6255edc94c32865a1b880503621d_96aa8a9c538d7176a93d416eb9d9bfac 2024-12-04T15:23:28,629 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-04T15:23:28,631 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=120, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,637 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-04T15:23:28,641 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-04T15:23:28,642 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=120, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,642 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-04T15:23:28,642 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733325808642"}]},"ts":"9223372036854775807"} 2024-12-04T15:23:28,645 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-04T15:23:28,645 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 96aa8a9c538d7176a93d416eb9d9bfac, NAME => 'TestAcidGuarantees,,1733325774641.96aa8a9c538d7176a93d416eb9d9bfac.', STARTKEY => '', ENDKEY => ''}] 2024-12-04T15:23:28,645 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-04T15:23:28,645 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733325808645"}]},"ts":"9223372036854775807"} 2024-12-04T15:23:28,647 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-04T15:23:28,649 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=120, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 129 msec 2024-12-04T15:23:28,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-04T15:23:28,825 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-04T15:23:28,837 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237 (was 237), OpenFileDescriptor=453 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=783 (was 776) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3794 (was 4158) 2024-12-04T15:23:28,849 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=783, ProcessCount=11, AvailableMemoryMB=3793 2024-12-04T15:23:28,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:23:28,851 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:23:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:28,855 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:23:28,855 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:28,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 121 2024-12-04T15:23:28,856 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:23:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-04T15:23:28,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742362_1538 (size=963) 2024-12-04T15:23:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-04T15:23:29,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-04T15:23:29,264 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:23:29,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742363_1539 (size=53) 2024-12-04T15:23:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-04T15:23:29,671 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:23:29,671 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1c46d02b12b7c26e3e20a64bcdd3bec5, disabling compactions & flushes 2024-12-04T15:23:29,671 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:29,671 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:29,671 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. after waiting 0 ms 2024-12-04T15:23:29,671 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:29,671 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:29,671 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:29,672 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:23:29,673 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733325809672"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325809672"}]},"ts":"1733325809672"} 2024-12-04T15:23:29,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:23:29,674 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:23:29,675 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325809674"}]},"ts":"1733325809674"} 2024-12-04T15:23:29,675 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-04T15:23:29,680 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, ASSIGN}] 2024-12-04T15:23:29,681 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, ASSIGN 2024-12-04T15:23:29,682 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:23:29,832 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=1c46d02b12b7c26e3e20a64bcdd3bec5, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:29,834 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; OpenRegionProcedure 1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:23:29,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-04T15:23:29,985 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:29,988 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:29,989 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7285): Opening region: {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:23:29,989 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:29,989 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:23:29,989 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7327): checking encryption for 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:29,989 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7330): checking classloading for 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:29,991 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:29,992 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:23:29,993 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1c46d02b12b7c26e3e20a64bcdd3bec5 columnFamilyName A 2024-12-04T15:23:29,993 DEBUG [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:29,993 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.HStore(327): Store=1c46d02b12b7c26e3e20a64bcdd3bec5/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:23:29,993 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:29,995 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:23:29,995 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1c46d02b12b7c26e3e20a64bcdd3bec5 columnFamilyName B 2024-12-04T15:23:29,995 DEBUG [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:29,996 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.HStore(327): Store=1c46d02b12b7c26e3e20a64bcdd3bec5/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:23:29,996 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:30,000 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:23:30,000 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1c46d02b12b7c26e3e20a64bcdd3bec5 columnFamilyName C 2024-12-04T15:23:30,000 DEBUG [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:23:30,000 INFO [StoreOpener-1c46d02b12b7c26e3e20a64bcdd3bec5-1 {}] regionserver.HStore(327): Store=1c46d02b12b7c26e3e20a64bcdd3bec5/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:23:30,000 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:30,001 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:30,001 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:30,002 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:23:30,004 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1085): writing seq id for 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:30,005 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:23:30,006 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1102): Opened 1c46d02b12b7c26e3e20a64bcdd3bec5; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66726418, jitterRate=-0.005698889493942261}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:23:30,007 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1001): Region open journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:30,007 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., pid=123, masterSystemTime=1733325809985 2024-12-04T15:23:30,009 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:30,009 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:30,009 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=1c46d02b12b7c26e3e20a64bcdd3bec5, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:30,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-04T15:23:30,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; OpenRegionProcedure 1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 in 177 msec 2024-12-04T15:23:30,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-12-04T15:23:30,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, ASSIGN in 333 msec 2024-12-04T15:23:30,014 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:23:30,014 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325810014"}]},"ts":"1733325810014"} 2024-12-04T15:23:30,015 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-04T15:23:30,018 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:23:30,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1670 sec 2024-12-04T15:23:30,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-04T15:23:30,961 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 121 completed 2024-12-04T15:23:30,963 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7815a655 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ee3f96a 2024-12-04T15:23:31,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ce2d5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,017 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,018 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,019 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:23:31,020 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:23:31,022 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22788836 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@23d11dba 2024-12-04T15:23:31,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46907051, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,078 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x029b4c46 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@290cbd62 2024-12-04T15:23:31,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c5b42ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,124 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f6c410 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3edd8081 2024-12-04T15:23:31,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7c9a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,186 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cd1f95e to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e5aa3cd 2024-12-04T15:23:31,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3806917d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,247 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x684e78a0 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33d1eaaf 2024-12-04T15:23:31,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fd018d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,309 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x101a467e to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f33d23c 2024-12-04T15:23:31,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@da40a5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,402 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x717d73ef to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40eefdce 2024-12-04T15:23:31,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6db5c15a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,474 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x738d3eb1 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@443abd83 2024-12-04T15:23:31,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7460b4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,549 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a4579dd to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b8211b3 2024-12-04T15:23:31,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ca967ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70776486 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ba3fcb2 2024-12-04T15:23:31,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c82f0a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:23:31,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-04T15:23:31,618 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:31,619 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:31,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:31,620 DEBUG [hconnection-0x17d47516-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,620 DEBUG [hconnection-0x6b7c580a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,621 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,625 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,631 DEBUG [hconnection-0x7d2d3b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,634 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,637 DEBUG [hconnection-0x37dacfe4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,638 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,640 DEBUG [hconnection-0x47b10fb6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,644 DEBUG [hconnection-0xc65765f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,645 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,647 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,661 DEBUG [hconnection-0x23671880-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,663 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:31,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:23:31,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:31,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:31,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:31,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:31,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:31,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:31,668 DEBUG [hconnection-0x44eb9049-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,672 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,676 DEBUG [hconnection-0xe7819d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,678 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,680 DEBUG [hconnection-0x4daf417f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:23:31,682 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:23:31,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325871699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325871704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325871704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325871709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325871710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:31,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/23a1b0fc8a8a4e13a2cd063f6eca3c34 is 50, key is test_row_0/A:col10/1733325811654/Put/seqid=0 2024-12-04T15:23:31,772 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:31,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:31,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:31,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:31,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:31,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:31,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742364_1540 (size=12001) 2024-12-04T15:23:31,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/23a1b0fc8a8a4e13a2cd063f6eca3c34 2024-12-04T15:23:31,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325871817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325871818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325871828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325871831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325871832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:31,940 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:31,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:31,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:31,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:31,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:31,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:31,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:31,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/424720a6f2bf429793ab330c9d5b5026 is 50, key is test_row_0/B:col10/1733325811654/Put/seqid=0 2024-12-04T15:23:32,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742365_1541 (size=12001) 2024-12-04T15:23:32,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325872041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325872048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325872045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325872058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325872058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,116 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:32,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:32,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:32,284 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325872344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325872364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325872372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325872374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325872376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/424720a6f2bf429793ab330c9d5b5026 2024-12-04T15:23:32,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:32,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:32,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/7808a8783ba5432ca9889120dfc084ed is 50, key is test_row_0/C:col10/1733325811654/Put/seqid=0 2024-12-04T15:23:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742366_1542 (size=12001) 2024-12-04T15:23:32,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/7808a8783ba5432ca9889120dfc084ed 2024-12-04T15:23:32,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/23a1b0fc8a8a4e13a2cd063f6eca3c34 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/23a1b0fc8a8a4e13a2cd063f6eca3c34 2024-12-04T15:23:32,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/23a1b0fc8a8a4e13a2cd063f6eca3c34, entries=150, sequenceid=13, filesize=11.7 K 2024-12-04T15:23:32,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/424720a6f2bf429793ab330c9d5b5026 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/424720a6f2bf429793ab330c9d5b5026 2024-12-04T15:23:32,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:32,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/424720a6f2bf429793ab330c9d5b5026, entries=150, sequenceid=13, filesize=11.7 K 2024-12-04T15:23:32,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/7808a8783ba5432ca9889120dfc084ed as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/7808a8783ba5432ca9889120dfc084ed 2024-12-04T15:23:32,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/7808a8783ba5432ca9889120dfc084ed, entries=150, sequenceid=13, filesize=11.7 K 2024-12-04T15:23:32,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 972ms, sequenceid=13, compaction requested=false 2024-12-04T15:23:32,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:32,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:32,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-04T15:23:32,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:32,770 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:23:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:32,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/5895d78eaa9848d699f941818a23dab9 is 50, key is test_row_0/A:col10/1733325811703/Put/seqid=0 2024-12-04T15:23:32,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742367_1543 (size=12001) 2024-12-04T15:23:32,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:32,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:32,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325872909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325872914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325872914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325872932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:32,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:32,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325872933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325873033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325873038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325873038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325873045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325873053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,234 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/5895d78eaa9848d699f941818a23dab9 2024-12-04T15:23:33,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325873241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325873249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325873258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325873258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325873266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/30dcfbc84bb6464388e74108c6883249 is 50, key is test_row_0/B:col10/1733325811703/Put/seqid=0 2024-12-04T15:23:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742368_1544 (size=12001) 2024-12-04T15:23:33,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325873549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325873557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325873568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325873573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325873580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:33,710 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/30dcfbc84bb6464388e74108c6883249 2024-12-04T15:23:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/57713693d879448a919e60e78ad9dfb2 is 50, key is test_row_0/C:col10/1733325811703/Put/seqid=0 2024-12-04T15:23:33,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742369_1545 (size=12001) 2024-12-04T15:23:34,022 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T15:23:34,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:34,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325874056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:34,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:34,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325874084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:34,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325874085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:34,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325874088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:34,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325874100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:34,214 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/57713693d879448a919e60e78ad9dfb2 2024-12-04T15:23:34,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/5895d78eaa9848d699f941818a23dab9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/5895d78eaa9848d699f941818a23dab9 2024-12-04T15:23:34,288 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/5895d78eaa9848d699f941818a23dab9, entries=150, sequenceid=37, filesize=11.7 K 2024-12-04T15:23:34,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/30dcfbc84bb6464388e74108c6883249 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/30dcfbc84bb6464388e74108c6883249 2024-12-04T15:23:34,307 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/30dcfbc84bb6464388e74108c6883249, entries=150, sequenceid=37, filesize=11.7 K 2024-12-04T15:23:34,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/57713693d879448a919e60e78ad9dfb2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/57713693d879448a919e60e78ad9dfb2 2024-12-04T15:23:34,327 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/57713693d879448a919e60e78ad9dfb2, entries=150, sequenceid=37, filesize=11.7 K 2024-12-04T15:23:34,333 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1564ms, sequenceid=37, compaction requested=false 2024-12-04T15:23:34,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:34,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:34,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-04T15:23:34,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-04T15:23:34,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-04T15:23:34,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7360 sec 2024-12-04T15:23:34,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.7410 sec 2024-12-04T15:23:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:35,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:23:35,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:35,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2bc5a0d923d44b19a9d6f785c675272e is 50, key is test_row_0/A:col10/1733325815083/Put/seqid=0 2024-12-04T15:23:35,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742370_1546 (size=14341) 2024-12-04T15:23:35,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325875149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325875149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325875150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325875155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325875156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325875262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325875265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325875265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325875267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325875267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325875468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325875469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325875470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325875473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325875473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2bc5a0d923d44b19a9d6f785c675272e 2024-12-04T15:23:35,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/229fc3f3fd0c4392b09ce68261f48237 is 50, key is test_row_0/B:col10/1733325815083/Put/seqid=0 2024-12-04T15:23:35,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742371_1547 (size=12001) 2024-12-04T15:23:35,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/229fc3f3fd0c4392b09ce68261f48237 2024-12-04T15:23:35,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/c53b83434e8444bdb87c94f0e1022034 is 50, key is test_row_0/C:col10/1733325815083/Put/seqid=0 2024-12-04T15:23:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742372_1548 (size=12001) 2024-12-04T15:23:35,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/c53b83434e8444bdb87c94f0e1022034 2024-12-04T15:23:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-04T15:23:35,747 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-04T15:23:35,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-04T15:23:35,750 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:35,751 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:35,751 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:35,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-04T15:23:35,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2bc5a0d923d44b19a9d6f785c675272e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2bc5a0d923d44b19a9d6f785c675272e 2024-12-04T15:23:35,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2bc5a0d923d44b19a9d6f785c675272e, entries=200, sequenceid=51, filesize=14.0 K 2024-12-04T15:23:35,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/229fc3f3fd0c4392b09ce68261f48237 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/229fc3f3fd0c4392b09ce68261f48237 2024-12-04T15:23:35,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/229fc3f3fd0c4392b09ce68261f48237, entries=150, sequenceid=51, filesize=11.7 K 2024-12-04T15:23:35,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/c53b83434e8444bdb87c94f0e1022034 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/c53b83434e8444bdb87c94f0e1022034 2024-12-04T15:23:35,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/c53b83434e8444bdb87c94f0e1022034, entries=150, sequenceid=51, filesize=11.7 K 2024-12-04T15:23:35,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 696ms, sequenceid=51, compaction requested=true 2024-12-04T15:23:35,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:35,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:35,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:35,782 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:35,782 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:35,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:35,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:35,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:35,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:35,784 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:35,784 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:35,784 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:35,784 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/424720a6f2bf429793ab330c9d5b5026, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/30dcfbc84bb6464388e74108c6883249, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/229fc3f3fd0c4392b09ce68261f48237] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=35.2 K 2024-12-04T15:23:35,785 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:35,785 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:35,785 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:35,785 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/23a1b0fc8a8a4e13a2cd063f6eca3c34, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/5895d78eaa9848d699f941818a23dab9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2bc5a0d923d44b19a9d6f785c675272e] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=37.4 K 2024-12-04T15:23:35,785 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23a1b0fc8a8a4e13a2cd063f6eca3c34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733325811654 2024-12-04T15:23:35,785 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 424720a6f2bf429793ab330c9d5b5026, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733325811654 2024-12-04T15:23:35,786 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5895d78eaa9848d699f941818a23dab9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733325811691 2024-12-04T15:23:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:35,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:35,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:35,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:35,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:35,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:35,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:35,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:35,788 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 30dcfbc84bb6464388e74108c6883249, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733325811691 2024-12-04T15:23:35,788 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bc5a0d923d44b19a9d6f785c675272e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733325812902 2024-12-04T15:23:35,789 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 229fc3f3fd0c4392b09ce68261f48237, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733325812902 2024-12-04T15:23:35,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/38886d8c9a094966b8ed7fc3c6932aa8 is 50, key is test_row_0/A:col10/1733325815154/Put/seqid=0 2024-12-04T15:23:35,800 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#463 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:35,801 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/bccb3dabeee24cc2bbf544439349d65a is 50, key is test_row_0/B:col10/1733325815083/Put/seqid=0 2024-12-04T15:23:35,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325875804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325875805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325875807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325875808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325875814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,839 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:35,840 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/3dd2c2789bb34074a7a7710c77641552 is 50, key is test_row_0/A:col10/1733325815083/Put/seqid=0 2024-12-04T15:23:35,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742373_1549 (size=12001) 2024-12-04T15:23:35,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/38886d8c9a094966b8ed7fc3c6932aa8 2024-12-04T15:23:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-04T15:23:35,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742374_1550 (size=12104) 2024-12-04T15:23:35,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/ae9d2811bc9c4bcea09db6f77aecd3d4 is 50, key is test_row_0/B:col10/1733325815154/Put/seqid=0 2024-12-04T15:23:35,881 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/bccb3dabeee24cc2bbf544439349d65a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bccb3dabeee24cc2bbf544439349d65a 2024-12-04T15:23:35,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742375_1551 (size=12104) 2024-12-04T15:23:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742376_1552 (size=12001) 2024-12-04T15:23:35,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/ae9d2811bc9c4bcea09db6f77aecd3d4 2024-12-04T15:23:35,889 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/3dd2c2789bb34074a7a7710c77641552 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/3dd2c2789bb34074a7a7710c77641552 2024-12-04T15:23:35,894 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 3dd2c2789bb34074a7a7710c77641552(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:35,894 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:35,894 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=13, startTime=1733325815781; duration=0sec 2024-12-04T15:23:35,894 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:35,894 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:35,894 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:35,895 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:35,895 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:35,895 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:35,895 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/7808a8783ba5432ca9889120dfc084ed, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/57713693d879448a919e60e78ad9dfb2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/c53b83434e8444bdb87c94f0e1022034] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=35.2 K 2024-12-04T15:23:35,896 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7808a8783ba5432ca9889120dfc084ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733325811654 2024-12-04T15:23:35,897 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57713693d879448a919e60e78ad9dfb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733325811691 2024-12-04T15:23:35,897 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c53b83434e8444bdb87c94f0e1022034, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733325812902 2024-12-04T15:23:35,898 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into bccb3dabeee24cc2bbf544439349d65a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:35,898 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:35,898 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=13, startTime=1733325815782; duration=0sec 2024-12-04T15:23:35,898 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:35,899 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:35,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/bd917a0195484716a512c575c09f3e6c is 50, key is test_row_0/C:col10/1733325815154/Put/seqid=0 2024-12-04T15:23:35,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-04T15:23:35,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:35,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:35,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:35,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:35,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:35,919 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#467 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:35,919 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/4f858110874a462d9cf1070a90feb174 is 50, key is test_row_0/C:col10/1733325815083/Put/seqid=0 2024-12-04T15:23:35,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325875915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325875917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325875917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325875917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325875928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:35,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742377_1553 (size=12001) 2024-12-04T15:23:35,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/bd917a0195484716a512c575c09f3e6c 2024-12-04T15:23:35,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/38886d8c9a094966b8ed7fc3c6932aa8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38886d8c9a094966b8ed7fc3c6932aa8 2024-12-04T15:23:35,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742378_1554 (size=12104) 2024-12-04T15:23:35,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38886d8c9a094966b8ed7fc3c6932aa8, entries=150, sequenceid=74, filesize=11.7 K 2024-12-04T15:23:35,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/ae9d2811bc9c4bcea09db6f77aecd3d4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ae9d2811bc9c4bcea09db6f77aecd3d4 2024-12-04T15:23:35,950 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/4f858110874a462d9cf1070a90feb174 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4f858110874a462d9cf1070a90feb174 2024-12-04T15:23:35,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ae9d2811bc9c4bcea09db6f77aecd3d4, entries=150, sequenceid=74, filesize=11.7 K 2024-12-04T15:23:35,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/bd917a0195484716a512c575c09f3e6c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bd917a0195484716a512c575c09f3e6c 2024-12-04T15:23:35,955 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 4f858110874a462d9cf1070a90feb174(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:35,955 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:35,956 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=13, startTime=1733325815782; duration=0sec 2024-12-04T15:23:35,956 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:35,956 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:35,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bd917a0195484716a512c575c09f3e6c, entries=150, sequenceid=74, filesize=11.7 K 2024-12-04T15:23:35,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 174ms, sequenceid=74, compaction requested=false 2024-12-04T15:23:35,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:36,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-04T15:23:36,057 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-04T15:23:36,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,057 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:36,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d3e93c6b0560492da192fbcbbdc7a51f is 50, key is test_row_0/A:col10/1733325815807/Put/seqid=0 2024-12-04T15:23:36,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742379_1555 (size=12001) 2024-12-04T15:23:36,076 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d3e93c6b0560492da192fbcbbdc7a51f 2024-12-04T15:23:36,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/bf278c7ea3e2419691224a651a682ad0 is 50, key is test_row_0/B:col10/1733325815807/Put/seqid=0 2024-12-04T15:23:36,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742380_1556 (size=12001) 2024-12-04T15:23:36,107 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/bf278c7ea3e2419691224a651a682ad0 2024-12-04T15:23:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/0fbd26f4a1794c319fee3a5651ab0855 is 50, key is test_row_0/C:col10/1733325815807/Put/seqid=0 2024-12-04T15:23:36,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742381_1557 (size=12001) 2024-12-04T15:23:36,121 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/0fbd26f4a1794c319fee3a5651ab0855 2024-12-04T15:23:36,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d3e93c6b0560492da192fbcbbdc7a51f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d3e93c6b0560492da192fbcbbdc7a51f 2024-12-04T15:23:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:36,133 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d3e93c6b0560492da192fbcbbdc7a51f, entries=150, sequenceid=90, filesize=11.7 K 2024-12-04T15:23:36,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/bf278c7ea3e2419691224a651a682ad0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bf278c7ea3e2419691224a651a682ad0 2024-12-04T15:23:36,140 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bf278c7ea3e2419691224a651a682ad0, entries=150, sequenceid=90, filesize=11.7 K 2024-12-04T15:23:36,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/0fbd26f4a1794c319fee3a5651ab0855 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0fbd26f4a1794c319fee3a5651ab0855 2024-12-04T15:23:36,145 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0fbd26f4a1794c319fee3a5651ab0855, entries=150, sequenceid=90, filesize=11.7 K 2024-12-04T15:23:36,146 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=60.38 KB/61830 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 89ms, sequenceid=90, compaction requested=true 2024-12-04T15:23:36,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:36,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-04T15:23:36,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-04T15:23:36,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:23:36,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:36,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-04T15:23:36,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 396 msec 2024-12-04T15:23:36,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:36,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:36,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:36,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:36,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:36,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:36,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 402 msec 2024-12-04T15:23:36,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/84d6a06e0ba44210bd3ab0d76f3bdd02 is 50, key is test_row_0/A:col10/1733325816138/Put/seqid=0 2024-12-04T15:23:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742382_1558 (size=14341) 2024-12-04T15:23:36,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/84d6a06e0ba44210bd3ab0d76f3bdd02 2024-12-04T15:23:36,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325876187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325876187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325876188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/80bbd91c1148461f97ac98664e5cca47 is 50, key is test_row_0/B:col10/1733325816138/Put/seqid=0 2024-12-04T15:23:36,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325876203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325876203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742383_1559 (size=12001) 2024-12-04T15:23:36,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325876305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325876305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325876305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325876311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325876314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-04T15:23:36,356 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-04T15:23:36,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-04T15:23:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-04T15:23:36,362 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:36,363 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:36,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-04T15:23:36,515 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-04T15:23:36,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:36,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325876511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325876511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325876513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325876528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325876529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/80bbd91c1148461f97ac98664e5cca47 2024-12-04T15:23:36,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/f752113224a94908a978058c3714b403 is 50, key is test_row_0/C:col10/1733325816138/Put/seqid=0 2024-12-04T15:23:36,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742384_1560 (size=12001) 2024-12-04T15:23:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-04T15:23:36,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-04T15:23:36,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:36,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325876819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325876823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-04T15:23:36,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:36,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325876823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325876839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325876839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-04T15:23:36,980 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:36,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-04T15:23:36,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:36,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:36,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:37,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/f752113224a94908a978058c3714b403 2024-12-04T15:23:37,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/84d6a06e0ba44210bd3ab0d76f3bdd02 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/84d6a06e0ba44210bd3ab0d76f3bdd02 2024-12-04T15:23:37,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/84d6a06e0ba44210bd3ab0d76f3bdd02, entries=200, sequenceid=104, filesize=14.0 K 2024-12-04T15:23:37,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/80bbd91c1148461f97ac98664e5cca47 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/80bbd91c1148461f97ac98664e5cca47 2024-12-04T15:23:37,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/80bbd91c1148461f97ac98664e5cca47, entries=150, sequenceid=104, filesize=11.7 K 2024-12-04T15:23:37,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/f752113224a94908a978058c3714b403 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/f752113224a94908a978058c3714b403 2024-12-04T15:23:37,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-04T15:23:37,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:37,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:37,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:37,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:37,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:37,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/f752113224a94908a978058c3714b403, entries=150, sequenceid=104, filesize=11.7 K 2024-12-04T15:23:37,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1012ms, sequenceid=104, compaction requested=true 2024-12-04T15:23:37,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:37,160 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:37,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:37,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:37,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:37,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:37,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:37,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:23:37,162 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:37,162 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:37,163 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:37,163 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/3dd2c2789bb34074a7a7710c77641552, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38886d8c9a094966b8ed7fc3c6932aa8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d3e93c6b0560492da192fbcbbdc7a51f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/84d6a06e0ba44210bd3ab0d76f3bdd02] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=49.3 K 2024-12-04T15:23:37,164 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:37,167 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dd2c2789bb34074a7a7710c77641552, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733325812902 2024-12-04T15:23:37,167 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38886d8c9a094966b8ed7fc3c6932aa8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733325815146 2024-12-04T15:23:37,168 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3e93c6b0560492da192fbcbbdc7a51f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733325815804 2024-12-04T15:23:37,168 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84d6a06e0ba44210bd3ab0d76f3bdd02, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733325816138 2024-12-04T15:23:37,188 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:37,188 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:37,188 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:37,188 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bccb3dabeee24cc2bbf544439349d65a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ae9d2811bc9c4bcea09db6f77aecd3d4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bf278c7ea3e2419691224a651a682ad0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/80bbd91c1148461f97ac98664e5cca47] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=47.0 K 2024-12-04T15:23:37,200 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting bccb3dabeee24cc2bbf544439349d65a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733325812902 2024-12-04T15:23:37,208 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ae9d2811bc9c4bcea09db6f77aecd3d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733325815146 2024-12-04T15:23:37,213 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting bf278c7ea3e2419691224a651a682ad0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733325815804 2024-12-04T15:23:37,220 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 80bbd91c1148461f97ac98664e5cca47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733325816138 2024-12-04T15:23:37,234 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#474 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:37,234 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b8a5624fdfa34e3a9389b3465dea4750 is 50, key is test_row_0/A:col10/1733325816138/Put/seqid=0 2024-12-04T15:23:37,290 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#475 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:37,291 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/8dc32097473e4a20a95aff467059942a is 50, key is test_row_0/B:col10/1733325816138/Put/seqid=0 2024-12-04T15:23:37,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742385_1561 (size=12241) 2024-12-04T15:23:37,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-04T15:23:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:37,301 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:37,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:37,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:37,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:37,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:37,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:37,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:37,309 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b8a5624fdfa34e3a9389b3465dea4750 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b8a5624fdfa34e3a9389b3465dea4750 2024-12-04T15:23:37,323 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into b8a5624fdfa34e3a9389b3465dea4750(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:37,323 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:37,323 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=12, startTime=1733325817160; duration=0sec 2024-12-04T15:23:37,323 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:37,323 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:37,323 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:37,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/0d5fa737059840068d007e4c3c77d7a6 is 50, key is test_row_0/A:col10/1733325816185/Put/seqid=0 2024-12-04T15:23:37,346 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:37,346 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:37,346 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:37,346 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4f858110874a462d9cf1070a90feb174, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bd917a0195484716a512c575c09f3e6c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0fbd26f4a1794c319fee3a5651ab0855, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/f752113224a94908a978058c3714b403] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=47.0 K 2024-12-04T15:23:37,352 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f858110874a462d9cf1070a90feb174, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733325812902 2024-12-04T15:23:37,355 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd917a0195484716a512c575c09f3e6c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733325815146 2024-12-04T15:23:37,355 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fbd26f4a1794c319fee3a5651ab0855, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733325815804 2024-12-04T15:23:37,355 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f752113224a94908a978058c3714b403, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733325816138 2024-12-04T15:23:37,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742386_1562 (size=12241) 2024-12-04T15:23:37,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325877370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325877370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325877370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325877372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325877384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,387 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#477 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:37,387 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d4e05164b6414433a3ccb73da6ae7dcb is 50, key is test_row_0/C:col10/1733325816138/Put/seqid=0 2024-12-04T15:23:37,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742387_1563 (size=12001) 2024-12-04T15:23:37,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742388_1564 (size=12241) 2024-12-04T15:23:37,433 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d4e05164b6414433a3ccb73da6ae7dcb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d4e05164b6414433a3ccb73da6ae7dcb 2024-12-04T15:23:37,446 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into d4e05164b6414433a3ccb73da6ae7dcb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:37,446 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:37,446 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=12, startTime=1733325817162; duration=0sec 2024-12-04T15:23:37,446 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:37,446 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-04T15:23:37,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325877486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325877488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325877488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325877489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325877492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325877700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325877701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325877700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325877701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325877701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:37,816 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/0d5fa737059840068d007e4c3c77d7a6 2024-12-04T15:23:37,829 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/8dc32097473e4a20a95aff467059942a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/8dc32097473e4a20a95aff467059942a 2024-12-04T15:23:37,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1917a55981f54404b2611056b1c2cfd3 is 50, key is test_row_0/B:col10/1733325816185/Put/seqid=0 2024-12-04T15:23:37,837 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 8dc32097473e4a20a95aff467059942a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:37,837 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:37,837 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=12, startTime=1733325817161; duration=0sec 2024-12-04T15:23:37,837 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:37,837 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:37,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742389_1565 (size=12001) 2024-12-04T15:23:38,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325878009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325878009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325878010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325878011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325878011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,295 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1917a55981f54404b2611056b1c2cfd3 2024-12-04T15:23:38,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/e786ea9140fb4f88bc1ffa2ed532f896 is 50, key is test_row_0/C:col10/1733325816185/Put/seqid=0 2024-12-04T15:23:38,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742390_1566 (size=12001) 2024-12-04T15:23:38,360 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/e786ea9140fb4f88bc1ffa2ed532f896 2024-12-04T15:23:38,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/0d5fa737059840068d007e4c3c77d7a6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0d5fa737059840068d007e4c3c77d7a6 2024-12-04T15:23:38,390 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0d5fa737059840068d007e4c3c77d7a6, entries=150, sequenceid=127, filesize=11.7 K 2024-12-04T15:23:38,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1917a55981f54404b2611056b1c2cfd3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1917a55981f54404b2611056b1c2cfd3 2024-12-04T15:23:38,395 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1917a55981f54404b2611056b1c2cfd3, entries=150, sequenceid=127, filesize=11.7 K 2024-12-04T15:23:38,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/e786ea9140fb4f88bc1ffa2ed532f896 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e786ea9140fb4f88bc1ffa2ed532f896 2024-12-04T15:23:38,400 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e786ea9140fb4f88bc1ffa2ed532f896, entries=150, sequenceid=127, filesize=11.7 K 2024-12-04T15:23:38,401 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1100ms, sequenceid=127, compaction requested=false 2024-12-04T15:23:38,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:38,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:38,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-04T15:23:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-04T15:23:38,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-04T15:23:38,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0390 sec 2024-12-04T15:23:38,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.0470 sec 2024-12-04T15:23:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-04T15:23:38,482 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-04T15:23:38,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-04T15:23:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:38,508 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:38,508 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:38,508 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:38,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:23:38,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:38,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:38,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:38,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:38,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:38,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:38,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:38,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2b33986f65ab4d1d9ce1dbe395e7c557 is 50, key is test_row_0/A:col10/1733325818521/Put/seqid=0 2024-12-04T15:23:38,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742391_1567 (size=14541) 2024-12-04T15:23:38,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325878572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325878580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325878574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325878583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325878582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:38,660 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:38,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:38,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:38,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:38,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:38,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:38,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:38,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325878692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325878693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325878694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325878694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325878700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:38,826 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:38,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:38,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:38,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:38,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:38,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:38,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325878905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325878905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325878905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325878906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325878921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:38,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2b33986f65ab4d1d9ce1dbe395e7c557 2024-12-04T15:23:39,001 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/fcd3617d70944a328a653930a1007b45 is 50, key is test_row_0/B:col10/1733325818521/Put/seqid=0 2024-12-04T15:23:39,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742392_1568 (size=12151) 2024-12-04T15:23:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:39,160 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325879220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325879220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325879224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325879229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325879240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/fcd3617d70944a328a653930a1007b45 2024-12-04T15:23:39,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ad5bacbb79cd49e0809bf6828d44f929 is 50, key is test_row_0/C:col10/1733325818521/Put/seqid=0 2024-12-04T15:23:39,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742393_1569 (size=12151) 2024-12-04T15:23:39,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ad5bacbb79cd49e0809bf6828d44f929 2024-12-04T15:23:39,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2b33986f65ab4d1d9ce1dbe395e7c557 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2b33986f65ab4d1d9ce1dbe395e7c557 2024-12-04T15:23:39,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:39,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2b33986f65ab4d1d9ce1dbe395e7c557, entries=200, sequenceid=144, filesize=14.2 K 2024-12-04T15:23:39,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/fcd3617d70944a328a653930a1007b45 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fcd3617d70944a328a653930a1007b45 2024-12-04T15:23:39,666 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fcd3617d70944a328a653930a1007b45, entries=150, sequenceid=144, filesize=11.9 K 2024-12-04T15:23:39,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ad5bacbb79cd49e0809bf6828d44f929 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ad5bacbb79cd49e0809bf6828d44f929 2024-12-04T15:23:39,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ad5bacbb79cd49e0809bf6828d44f929, entries=150, sequenceid=144, filesize=11.9 K 2024-12-04T15:23:39,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1201ms, sequenceid=144, compaction requested=true 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:39,724 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:23:39,724 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:39,725 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:39,725 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:39,725 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:39,725 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,725 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:39,725 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,725 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b8a5624fdfa34e3a9389b3465dea4750, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0d5fa737059840068d007e4c3c77d7a6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2b33986f65ab4d1d9ce1dbe395e7c557] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=37.9 K 2024-12-04T15:23:39,726 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/8dc32097473e4a20a95aff467059942a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1917a55981f54404b2611056b1c2cfd3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fcd3617d70944a328a653930a1007b45] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=35.5 K 2024-12-04T15:23:39,726 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8a5624fdfa34e3a9389b3465dea4750, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733325816138 2024-12-04T15:23:39,726 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dc32097473e4a20a95aff467059942a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733325816138 2024-12-04T15:23:39,732 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1917a55981f54404b2611056b1c2cfd3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733325816185 2024-12-04T15:23:39,732 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fcd3617d70944a328a653930a1007b45, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733325817368 2024-12-04T15:23:39,736 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d5fa737059840068d007e4c3c77d7a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733325816185 2024-12-04T15:23:39,736 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b33986f65ab4d1d9ce1dbe395e7c557, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733325817368 2024-12-04T15:23:39,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:39,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:39,758 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#483 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:39,758 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/b2e2e785fe364c3bad54099c0c0e669f is 50, key is test_row_0/B:col10/1733325818521/Put/seqid=0 2024-12-04T15:23:39,763 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#484 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:39,763 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/a28b9152f1f04392a7b5fc2aa04623f0 is 50, key is test_row_0/A:col10/1733325818521/Put/seqid=0 2024-12-04T15:23:39,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/419c19a5805044429af568f1cec839e3 is 50, key is test_row_0/A:col10/1733325818582/Put/seqid=0 2024-12-04T15:23:39,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325879783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325879784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325879793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325879798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325879800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,820 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742394_1570 (size=12493) 2024-12-04T15:23:39,845 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/b2e2e785fe364c3bad54099c0c0e669f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b2e2e785fe364c3bad54099c0c0e669f 2024-12-04T15:23:39,849 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into b2e2e785fe364c3bad54099c0c0e669f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:39,850 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:39,850 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=13, startTime=1733325819724; duration=0sec 2024-12-04T15:23:39,850 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:39,850 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:39,850 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:39,851 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:39,851 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:39,851 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,851 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d4e05164b6414433a3ccb73da6ae7dcb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e786ea9140fb4f88bc1ffa2ed532f896, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ad5bacbb79cd49e0809bf6828d44f929] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=35.5 K 2024-12-04T15:23:39,851 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d4e05164b6414433a3ccb73da6ae7dcb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733325816138 2024-12-04T15:23:39,851 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e786ea9140fb4f88bc1ffa2ed532f896, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733325816185 2024-12-04T15:23:39,852 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ad5bacbb79cd49e0809bf6828d44f929, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733325817368 2024-12-04T15:23:39,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742395_1571 (size=12493) 2024-12-04T15:23:39,873 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#486 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:39,874 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d980863a7b404f8a947baf0e7e2b90a8 is 50, key is test_row_0/C:col10/1733325818521/Put/seqid=0 2024-12-04T15:23:39,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742396_1572 (size=14541) 2024-12-04T15:23:39,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325879900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742397_1573 (size=12493) 2024-12-04T15:23:39,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325879908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,936 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d980863a7b404f8a947baf0e7e2b90a8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d980863a7b404f8a947baf0e7e2b90a8 2024-12-04T15:23:39,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325879925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325879924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325879936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,945 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into d980863a7b404f8a947baf0e7e2b90a8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:39,946 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:39,946 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=13, startTime=1733325819724; duration=0sec 2024-12-04T15:23:39,946 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:39,946 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:39,983 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:39,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:39,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:39,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:39,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325880115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325880128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,148 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325880144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325880144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:40,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:40,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325880148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,273 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/a28b9152f1f04392a7b5fc2aa04623f0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/a28b9152f1f04392a7b5fc2aa04623f0 2024-12-04T15:23:40,296 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into a28b9152f1f04392a7b5fc2aa04623f0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:40,297 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:40,297 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=13, startTime=1733325819724; duration=0sec 2024-12-04T15:23:40,297 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:40,297 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:40,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/419c19a5805044429af568f1cec839e3 2024-12-04T15:23:40,314 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:40,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:40,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/2727f7e8601843b78877e9dc27064073 is 50, key is test_row_0/B:col10/1733325818582/Put/seqid=0 2024-12-04T15:23:40,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742398_1574 (size=12151) 2024-12-04T15:23:40,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/2727f7e8601843b78877e9dc27064073 2024-12-04T15:23:40,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325880419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/a794cec60ad24328af81d62c3a0f0309 is 50, key is test_row_0/C:col10/1733325818582/Put/seqid=0 2024-12-04T15:23:40,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325880442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742399_1575 (size=12151) 2024-12-04T15:23:40,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/a794cec60ad24328af81d62c3a0f0309 2024-12-04T15:23:40,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325880453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325880455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325880457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:40,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:40,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:40,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/419c19a5805044429af568f1cec839e3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/419c19a5805044429af568f1cec839e3 2024-12-04T15:23:40,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/419c19a5805044429af568f1cec839e3, entries=200, sequenceid=168, filesize=14.2 K 2024-12-04T15:23:40,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/2727f7e8601843b78877e9dc27064073 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/2727f7e8601843b78877e9dc27064073 2024-12-04T15:23:40,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/2727f7e8601843b78877e9dc27064073, entries=150, sequenceid=168, filesize=11.9 K 2024-12-04T15:23:40,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/a794cec60ad24328af81d62c3a0f0309 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/a794cec60ad24328af81d62c3a0f0309 2024-12-04T15:23:40,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/a794cec60ad24328af81d62c3a0f0309, entries=150, sequenceid=168, filesize=11.9 K 2024-12-04T15:23:40,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 744ms, sequenceid=168, compaction requested=false 2024-12-04T15:23:40,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:40,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:40,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-04T15:23:40,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:40,630 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:23:40,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:40,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:40,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:40,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:40,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:40,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:40,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/8c050425b22d47d9b42fe684e63d1b91 is 50, key is test_row_0/A:col10/1733325819796/Put/seqid=0 2024-12-04T15:23:40,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742400_1576 (size=12151) 2024-12-04T15:23:40,709 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/8c050425b22d47d9b42fe684e63d1b91 2024-12-04T15:23:40,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/a1712c60bae0483b824f59031ca5180c is 50, key is test_row_0/B:col10/1733325819796/Put/seqid=0 2024-12-04T15:23:40,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742401_1577 (size=12151) 2024-12-04T15:23:40,760 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/a1712c60bae0483b824f59031ca5180c 2024-12-04T15:23:40,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/e5d477522f144b3581aa1fd01a37d28f is 50, key is test_row_0/C:col10/1733325819796/Put/seqid=0 2024-12-04T15:23:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742402_1578 (size=12151) 2024-12-04T15:23:40,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:40,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:41,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325881029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325881031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325881049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325881051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325881045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325881153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325881164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325881172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325881176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325881176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,266 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/e5d477522f144b3581aa1fd01a37d28f 2024-12-04T15:23:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/8c050425b22d47d9b42fe684e63d1b91 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/8c050425b22d47d9b42fe684e63d1b91 2024-12-04T15:23:41,337 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/8c050425b22d47d9b42fe684e63d1b91, entries=150, sequenceid=183, filesize=11.9 K 2024-12-04T15:23:41,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/a1712c60bae0483b824f59031ca5180c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a1712c60bae0483b824f59031ca5180c 2024-12-04T15:23:41,353 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a1712c60bae0483b824f59031ca5180c, entries=150, sequenceid=183, filesize=11.9 K 2024-12-04T15:23:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/e5d477522f144b3581aa1fd01a37d28f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e5d477522f144b3581aa1fd01a37d28f 2024-12-04T15:23:41,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325881372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,380 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e5d477522f144b3581aa1fd01a37d28f, entries=150, sequenceid=183, filesize=11.9 K 2024-12-04T15:23:41,383 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 754ms, sequenceid=183, compaction requested=true 2024-12-04T15:23:41,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:41,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:41,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-04T15:23:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-04T15:23:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:41,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-04T15:23:41,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:41,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:41,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:41,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:41,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:41,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:41,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-04T15:23:41,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8910 sec 2024-12-04T15:23:41,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.9070 sec 2024-12-04T15:23:41,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325881408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2f203e60ee5d463bbaff836f70cd8559 is 50, key is test_row_0/A:col10/1733325821009/Put/seqid=0 2024-12-04T15:23:41,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325881417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325881408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325881421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742403_1579 (size=12151) 2024-12-04T15:23:41,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325881528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325881528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325881531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325881540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325881688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325881740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325881740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325881741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:41,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325881754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:41,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2f203e60ee5d463bbaff836f70cd8559 2024-12-04T15:23:41,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1259db9c467444b280212fae0c892c05 is 50, key is test_row_0/B:col10/1733325821009/Put/seqid=0 2024-12-04T15:23:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742404_1580 (size=12151) 2024-12-04T15:23:42,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1259db9c467444b280212fae0c892c05 2024-12-04T15:23:42,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325882053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325882053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325882053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325882064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/479b9e4ea5534e748cc7f72f7df337c4 is 50, key is test_row_0/C:col10/1733325821009/Put/seqid=0 2024-12-04T15:23:42,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742405_1581 (size=12151) 2024-12-04T15:23:42,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/479b9e4ea5534e748cc7f72f7df337c4 2024-12-04T15:23:42,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/2f203e60ee5d463bbaff836f70cd8559 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2f203e60ee5d463bbaff836f70cd8559 2024-12-04T15:23:42,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325882200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2f203e60ee5d463bbaff836f70cd8559, entries=150, sequenceid=210, filesize=11.9 K 2024-12-04T15:23:42,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1259db9c467444b280212fae0c892c05 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1259db9c467444b280212fae0c892c05 2024-12-04T15:23:42,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1259db9c467444b280212fae0c892c05, entries=150, sequenceid=210, filesize=11.9 K 2024-12-04T15:23:42,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/479b9e4ea5534e748cc7f72f7df337c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/479b9e4ea5534e748cc7f72f7df337c4 2024-12-04T15:23:42,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/479b9e4ea5534e748cc7f72f7df337c4, entries=150, sequenceid=210, filesize=11.9 K 2024-12-04T15:23:42,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 936ms, sequenceid=210, compaction requested=true 2024-12-04T15:23:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:42,330 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:42,330 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:42,342 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51336 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:42,342 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:42,342 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,342 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/a28b9152f1f04392a7b5fc2aa04623f0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/419c19a5805044429af568f1cec839e3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/8c050425b22d47d9b42fe684e63d1b91, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2f203e60ee5d463bbaff836f70cd8559] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=50.1 K 2024-12-04T15:23:42,343 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a28b9152f1f04392a7b5fc2aa04623f0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733325817368 2024-12-04T15:23:42,343 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 419c19a5805044429af568f1cec839e3, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733325818572 2024-12-04T15:23:42,344 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c050425b22d47d9b42fe684e63d1b91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733325819771 2024-12-04T15:23:42,344 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f203e60ee5d463bbaff836f70cd8559, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325821009 2024-12-04T15:23:42,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:42,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:42,348 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,348 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b2e2e785fe364c3bad54099c0c0e669f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/2727f7e8601843b78877e9dc27064073, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a1712c60bae0483b824f59031ca5180c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1259db9c467444b280212fae0c892c05] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=47.8 K 2024-12-04T15:23:42,352 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b2e2e785fe364c3bad54099c0c0e669f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733325817368 2024-12-04T15:23:42,358 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#495 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:42,359 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/bfe3420fb0e34372b9cd80aa2ba0bbfc is 50, key is test_row_0/A:col10/1733325821009/Put/seqid=0 2024-12-04T15:23:42,362 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2727f7e8601843b78877e9dc27064073, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733325818582 2024-12-04T15:23:42,363 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a1712c60bae0483b824f59031ca5180c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733325819771 2024-12-04T15:23:42,368 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1259db9c467444b280212fae0c892c05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325821009 2024-12-04T15:23:42,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742406_1582 (size=12629) 2024-12-04T15:23:42,466 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:42,467 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/ff21cac52b964890abd35f1ea3c68468 is 50, key is test_row_0/B:col10/1733325821009/Put/seqid=0 2024-12-04T15:23:42,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742407_1583 (size=12629) 2024-12-04T15:23:42,548 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/ff21cac52b964890abd35f1ea3c68468 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ff21cac52b964890abd35f1ea3c68468 2024-12-04T15:23:42,583 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into ff21cac52b964890abd35f1ea3c68468(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:42,583 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:42,583 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=12, startTime=1733325822330; duration=0sec 2024-12-04T15:23:42,583 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:42,583 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:42,583 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:42,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:23:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:42,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:42,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:42,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:42,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:42,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:42,594 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:42,594 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:42,594 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,594 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d980863a7b404f8a947baf0e7e2b90a8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/a794cec60ad24328af81d62c3a0f0309, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e5d477522f144b3581aa1fd01a37d28f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/479b9e4ea5534e748cc7f72f7df337c4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=47.8 K 2024-12-04T15:23:42,600 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d980863a7b404f8a947baf0e7e2b90a8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733325817368 2024-12-04T15:23:42,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/db8566b6a1a04f7a8364ada033a85895 is 50, key is test_row_0/A:col10/1733325821416/Put/seqid=0 2024-12-04T15:23:42,604 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a794cec60ad24328af81d62c3a0f0309, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733325818582 2024-12-04T15:23:42,608 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e5d477522f144b3581aa1fd01a37d28f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733325819771 2024-12-04T15:23:42,612 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 479b9e4ea5534e748cc7f72f7df337c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325821009 2024-12-04T15:23:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-04T15:23:42,634 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-04T15:23:42,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-04T15:23:42,640 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:42,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742408_1584 (size=19321) 2024-12-04T15:23:42,641 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:42,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-04T15:23:42,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/db8566b6a1a04f7a8364ada033a85895 2024-12-04T15:23:42,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/7e644bb115674cf391f0047d260c5713 is 50, key is test_row_0/B:col10/1733325821416/Put/seqid=0 2024-12-04T15:23:42,699 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#499 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:42,699 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/31dddba7cd47478f89c30b267e27908d is 50, key is test_row_0/C:col10/1733325821009/Put/seqid=0 2024-12-04T15:23:42,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325882685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325882687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325882689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325882696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-04T15:23:42,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742410_1586 (size=12629) 2024-12-04T15:23:42,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742409_1585 (size=12151) 2024-12-04T15:23:42,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/7e644bb115674cf391f0047d260c5713 2024-12-04T15:23:42,777 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/31dddba7cd47478f89c30b267e27908d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/31dddba7cd47478f89c30b267e27908d 2024-12-04T15:23:42,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ff1475c1c5a64d8aa313eb28cf02b1b6 is 50, key is test_row_0/C:col10/1733325821416/Put/seqid=0 2024-12-04T15:23:42,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-04T15:23:42,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:42,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:42,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:42,813 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 31dddba7cd47478f89c30b267e27908d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:42,813 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:42,813 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=12, startTime=1733325822330; duration=0sec 2024-12-04T15:23:42,813 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:42,813 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:42,817 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/bfe3420fb0e34372b9cd80aa2ba0bbfc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/bfe3420fb0e34372b9cd80aa2ba0bbfc 2024-12-04T15:23:42,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325882812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325882813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325882813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325882814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,825 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into bfe3420fb0e34372b9cd80aa2ba0bbfc(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:42,826 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:42,826 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=12, startTime=1733325822329; duration=0sec 2024-12-04T15:23:42,826 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:42,826 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:42,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742411_1587 (size=12151) 2024-12-04T15:23:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-04T15:23:42,968 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:42,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-04T15:23:42,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:42,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:42,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:42,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:43,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325883023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325883029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325883032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325883032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,129 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-04T15:23:43,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:43,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:43,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:43,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:43,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:43,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:43,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325883225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ff1475c1c5a64d8aa313eb28cf02b1b6 2024-12-04T15:23:43,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/db8566b6a1a04f7a8364ada033a85895 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/db8566b6a1a04f7a8364ada033a85895 2024-12-04T15:23:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-04T15:23:43,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/db8566b6a1a04f7a8364ada033a85895, entries=300, sequenceid=224, filesize=18.9 K 2024-12-04T15:23:43,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/7e644bb115674cf391f0047d260c5713 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/7e644bb115674cf391f0047d260c5713 2024-12-04T15:23:43,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/7e644bb115674cf391f0047d260c5713, entries=150, sequenceid=224, filesize=11.9 K 2024-12-04T15:23:43,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ff1475c1c5a64d8aa313eb28cf02b1b6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ff1475c1c5a64d8aa313eb28cf02b1b6 2024-12-04T15:23:43,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ff1475c1c5a64d8aa313eb28cf02b1b6, entries=150, sequenceid=224, filesize=11.9 K 2024-12-04T15:23:43,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 679ms, sequenceid=224, compaction requested=false 2024-12-04T15:23:43,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:43,288 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:43,292 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:43,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:43,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/97565dfedb7642cba5547982827bc463 is 50, key is test_row_0/A:col10/1733325822680/Put/seqid=0 2024-12-04T15:23:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:43,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:43,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742412_1588 (size=12151) 2024-12-04T15:23:43,382 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/97565dfedb7642cba5547982827bc463 2024-12-04T15:23:43,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325883372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325883378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325883385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325883385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/dc7448663a4741769174325ea0b242a0 is 50, key is test_row_0/B:col10/1733325822680/Put/seqid=0 2024-12-04T15:23:43,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325883496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325883496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325883504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325883506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742413_1589 (size=12151) 2024-12-04T15:23:43,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325883706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325883714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325883719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:43,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325883725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:43,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-04T15:23:43,924 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/dc7448663a4741769174325ea0b242a0 2024-12-04T15:23:43,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/bbebe2aeff7e40c79c37c495b82a5dfd is 50, key is test_row_0/C:col10/1733325822680/Put/seqid=0 2024-12-04T15:23:44,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325884020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742414_1590 (size=12151) 2024-12-04T15:23:44,034 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/bbebe2aeff7e40c79c37c495b82a5dfd 2024-12-04T15:23:44,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325884028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325884033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325884043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/97565dfedb7642cba5547982827bc463 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/97565dfedb7642cba5547982827bc463 2024-12-04T15:23:44,094 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/97565dfedb7642cba5547982827bc463, entries=150, sequenceid=249, filesize=11.9 K 2024-12-04T15:23:44,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/dc7448663a4741769174325ea0b242a0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/dc7448663a4741769174325ea0b242a0 2024-12-04T15:23:44,115 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/dc7448663a4741769174325ea0b242a0, entries=150, sequenceid=249, filesize=11.9 K 2024-12-04T15:23:44,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/bbebe2aeff7e40c79c37c495b82a5dfd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bbebe2aeff7e40c79c37c495b82a5dfd 2024-12-04T15:23:44,128 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bbebe2aeff7e40c79c37c495b82a5dfd, entries=150, sequenceid=249, filesize=11.9 K 2024-12-04T15:23:44,136 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 837ms, sequenceid=249, compaction requested=true 2024-12-04T15:23:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:44,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-04T15:23:44,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-04T15:23:44,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-04T15:23:44,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5030 sec 2024-12-04T15:23:44,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.5090 sec 2024-12-04T15:23:44,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:23:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:44,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:44,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:44,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:44,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:44,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:44,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:44,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/dd65a23ceaa54a7db6fd93addae7ca60 is 50, key is test_row_0/A:col10/1733325823369/Put/seqid=0 2024-12-04T15:23:44,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742415_1591 (size=12301) 2024-12-04T15:23:44,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/dd65a23ceaa54a7db6fd93addae7ca60 2024-12-04T15:23:44,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325884611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325884612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325884618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325884628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/fe2cffd7d3764430bdc1cad7518b6526 is 50, key is test_row_0/B:col10/1733325823369/Put/seqid=0 2024-12-04T15:23:44,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742416_1592 (size=12301) 2024-12-04T15:23:44,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/fe2cffd7d3764430bdc1cad7518b6526 2024-12-04T15:23:44,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325884727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325884728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325884727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-04T15:23:44,749 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-04T15:23:44,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325884744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:44,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/05941c8956fd4ebd99a61849e9ebce87 is 50, key is test_row_0/C:col10/1733325823369/Put/seqid=0 2024-12-04T15:23:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-04T15:23:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-04T15:23:44,772 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:44,776 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:44,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:44,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742417_1593 (size=12301) 2024-12-04T15:23:44,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-04T15:23:44,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-04T15:23:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:44,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:44,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325884949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325884949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325884949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:44,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:44,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325884980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-04T15:23:45,093 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-04T15:23:45,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:45,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:45,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:45,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:45,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/05941c8956fd4ebd99a61849e9ebce87 2024-12-04T15:23:45,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325885235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,244 DEBUG [Thread-2373 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4215 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:23:45,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325885254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325885254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325885254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-04T15:23:45,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:45,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:45,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:45,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/dd65a23ceaa54a7db6fd93addae7ca60 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/dd65a23ceaa54a7db6fd93addae7ca60 2024-12-04T15:23:45,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:45,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325885299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/dd65a23ceaa54a7db6fd93addae7ca60, entries=150, sequenceid=264, filesize=12.0 K 2024-12-04T15:23:45,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/fe2cffd7d3764430bdc1cad7518b6526 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fe2cffd7d3764430bdc1cad7518b6526 2024-12-04T15:23:45,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fe2cffd7d3764430bdc1cad7518b6526, entries=150, sequenceid=264, filesize=12.0 K 2024-12-04T15:23:45,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/05941c8956fd4ebd99a61849e9ebce87 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/05941c8956fd4ebd99a61849e9ebce87 2024-12-04T15:23:45,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-04T15:23:45,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/05941c8956fd4ebd99a61849e9ebce87, entries=150, sequenceid=264, filesize=12.0 K 2024-12-04T15:23:45,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 857ms, sequenceid=264, compaction requested=true 2024-12-04T15:23:45,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:45,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:45,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:45,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:45,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:23:45,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:45,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:23:45,402 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:45,404 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:45,413 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56402 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:45,413 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:45,413 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,413 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/bfe3420fb0e34372b9cd80aa2ba0bbfc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/db8566b6a1a04f7a8364ada033a85895, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/97565dfedb7642cba5547982827bc463, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/dd65a23ceaa54a7db6fd93addae7ca60] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=55.1 K 2024-12-04T15:23:45,414 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:45,414 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:45,414 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,414 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/31dddba7cd47478f89c30b267e27908d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ff1475c1c5a64d8aa313eb28cf02b1b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bbebe2aeff7e40c79c37c495b82a5dfd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/05941c8956fd4ebd99a61849e9ebce87] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=48.1 K 2024-12-04T15:23:45,414 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfe3420fb0e34372b9cd80aa2ba0bbfc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325821009 2024-12-04T15:23:45,416 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 31dddba7cd47478f89c30b267e27908d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325821009 2024-12-04T15:23:45,416 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting db8566b6a1a04f7a8364ada033a85895, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1733325821416 2024-12-04T15:23:45,416 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97565dfedb7642cba5547982827bc463, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733325822679 2024-12-04T15:23:45,416 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ff1475c1c5a64d8aa313eb28cf02b1b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1733325821416 2024-12-04T15:23:45,420 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd65a23ceaa54a7db6fd93addae7ca60, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1733325823368 2024-12-04T15:23:45,420 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting bbebe2aeff7e40c79c37c495b82a5dfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733325822679 2024-12-04T15:23:45,421 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 05941c8956fd4ebd99a61849e9ebce87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1733325823368 2024-12-04T15:23:45,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,441 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:45,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:45,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/413f22f22a5a41ca86fb048bc6a11117 is 50, key is test_row_0/A:col10/1733325824608/Put/seqid=0 2024-12-04T15:23:45,463 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#508 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:45,464 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d6a90098d1f84132b41b690bdb0fda3d is 50, key is test_row_0/A:col10/1733325823369/Put/seqid=0 2024-12-04T15:23:45,482 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#509 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:45,483 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/5596662f92fc4876b52195b5c2e265f3 is 50, key is test_row_0/C:col10/1733325823369/Put/seqid=0 2024-12-04T15:23:45,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742418_1594 (size=12301) 2024-12-04T15:23:45,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742419_1595 (size=12915) 2024-12-04T15:23:45,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742420_1596 (size=12915) 2024-12-04T15:23:45,584 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d6a90098d1f84132b41b690bdb0fda3d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d6a90098d1f84132b41b690bdb0fda3d 2024-12-04T15:23:45,608 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into d6a90098d1f84132b41b690bdb0fda3d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:45,608 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:45,608 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=12, startTime=1733325825401; duration=0sec 2024-12-04T15:23:45,609 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:45,609 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:45,609 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:23:45,619 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:23:45,619 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:45,619 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:45,619 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ff21cac52b964890abd35f1ea3c68468, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/7e644bb115674cf391f0047d260c5713, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/dc7448663a4741769174325ea0b242a0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fe2cffd7d3764430bdc1cad7518b6526] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=48.1 K 2024-12-04T15:23:45,625 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff21cac52b964890abd35f1ea3c68468, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733325821009 2024-12-04T15:23:45,628 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e644bb115674cf391f0047d260c5713, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1733325821416 2024-12-04T15:23:45,632 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc7448663a4741769174325ea0b242a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733325822679 2024-12-04T15:23:45,633 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe2cffd7d3764430bdc1cad7518b6526, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1733325823368 2024-12-04T15:23:45,706 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:45,707 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/b9703be162984068a0270fb2be851720 is 50, key is test_row_0/B:col10/1733325823369/Put/seqid=0 2024-12-04T15:23:45,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742421_1597 (size=12915) 2024-12-04T15:23:45,784 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/b9703be162984068a0270fb2be851720 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b9703be162984068a0270fb2be851720 2024-12-04T15:23:45,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:45,789 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into b9703be162984068a0270fb2be851720(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:45,789 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:45,789 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=12, startTime=1733325825401; duration=0sec 2024-12-04T15:23:45,789 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:45,789 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:45,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325885848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325885861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325885862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325885865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-04T15:23:45,942 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/413f22f22a5a41ca86fb048bc6a11117 2024-12-04T15:23:45,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325885966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325885971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325885972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:45,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/df8c437c786b48819e669eb5758084f7 is 50, key is test_row_0/B:col10/1733325824608/Put/seqid=0 2024-12-04T15:23:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325885983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:45,988 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/5596662f92fc4876b52195b5c2e265f3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5596662f92fc4876b52195b5c2e265f3 2024-12-04T15:23:45,993 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 5596662f92fc4876b52195b5c2e265f3(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:45,993 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:45,994 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=12, startTime=1733325825402; duration=0sec 2024-12-04T15:23:45,994 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:45,994 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:46,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742422_1598 (size=12301) 2024-12-04T15:23:46,024 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/df8c437c786b48819e669eb5758084f7 2024-12-04T15:23:46,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/42455b991e4644fc830b8856b8841c86 is 50, key is test_row_0/C:col10/1733325824608/Put/seqid=0 2024-12-04T15:23:46,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742423_1599 (size=12301) 2024-12-04T15:23:46,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325886196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325886196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325886200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325886200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325886512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,519 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/42455b991e4644fc830b8856b8841c86 2024-12-04T15:23:46,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325886516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325886517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325886519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:46,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/413f22f22a5a41ca86fb048bc6a11117 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/413f22f22a5a41ca86fb048bc6a11117 2024-12-04T15:23:46,593 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/413f22f22a5a41ca86fb048bc6a11117, entries=150, sequenceid=285, filesize=12.0 K 2024-12-04T15:23:46,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/df8c437c786b48819e669eb5758084f7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/df8c437c786b48819e669eb5758084f7 2024-12-04T15:23:46,629 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/df8c437c786b48819e669eb5758084f7, entries=150, sequenceid=285, filesize=12.0 K 2024-12-04T15:23:46,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/42455b991e4644fc830b8856b8841c86 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/42455b991e4644fc830b8856b8841c86 2024-12-04T15:23:46,665 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/42455b991e4644fc830b8856b8841c86, entries=150, sequenceid=285, filesize=12.0 K 2024-12-04T15:23:46,666 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1225ms, sequenceid=285, compaction requested=false 2024-12-04T15:23:46,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:46,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:46,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-04T15:23:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-04T15:23:46,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-04T15:23:46,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8970 sec 2024-12-04T15:23:46,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.9170 sec 2024-12-04T15:23:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-04T15:23:46,885 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-04T15:23:46,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-04T15:23:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:46,904 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:46,905 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:46,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:47,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:47,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-04T15:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:47,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:47,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/02f16b51f1cd4403804f27973b15a041 is 50, key is test_row_0/A:col10/1733325827028/Put/seqid=0 2024-12-04T15:23:47,058 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:47,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:47,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742424_1600 (size=14741) 2024-12-04T15:23:47,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325887088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325887096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325887100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325887106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:47,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325887208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325887209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325887216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325887220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,230 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:47,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:47,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,384 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:47,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:47,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325887420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325887425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325887432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325887433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/02f16b51f1cd4403804f27973b15a041 2024-12-04T15:23:47,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:47,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/3b815580182149e1b5b7ed452ae2214f is 50, key is test_row_0/B:col10/1733325827028/Put/seqid=0 2024-12-04T15:23:47,545 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742425_1601 (size=12301) 2024-12-04T15:23:47,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/3b815580182149e1b5b7ed452ae2214f 2024-12-04T15:23:47,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/fd67b93bc2c0467dbdc75eac606c7422 is 50, key is test_row_0/C:col10/1733325827028/Put/seqid=0 2024-12-04T15:23:47,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742426_1602 (size=12301) 2024-12-04T15:23:47,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:47,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:47,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325887741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325887744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325887760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325887761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:47,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:47,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:47,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:47,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:47,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:48,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:48,029 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:48,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:48,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:48,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:23:48,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/fd67b93bc2c0467dbdc75eac606c7422 2024-12-04T15:23:48,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/02f16b51f1cd4403804f27973b15a041 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/02f16b51f1cd4403804f27973b15a041 2024-12-04T15:23:48,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/02f16b51f1cd4403804f27973b15a041, entries=200, sequenceid=305, filesize=14.4 K 2024-12-04T15:23:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/3b815580182149e1b5b7ed452ae2214f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/3b815580182149e1b5b7ed452ae2214f 2024-12-04T15:23:48,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/3b815580182149e1b5b7ed452ae2214f, entries=150, sequenceid=305, filesize=12.0 K 2024-12-04T15:23:48,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/fd67b93bc2c0467dbdc75eac606c7422 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fd67b93bc2c0467dbdc75eac606c7422 2024-12-04T15:23:48,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fd67b93bc2c0467dbdc75eac606c7422, entries=150, sequenceid=305, filesize=12.0 K 2024-12-04T15:23:48,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1112ms, sequenceid=305, compaction requested=true 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:48,141 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:23:48,144 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:48,151 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:48,152 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:48,152 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:48,152 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d6a90098d1f84132b41b690bdb0fda3d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/413f22f22a5a41ca86fb048bc6a11117, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/02f16b51f1cd4403804f27973b15a041] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=39.0 K 2024-12-04T15:23:48,156 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6a90098d1f84132b41b690bdb0fda3d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1733325823368 2024-12-04T15:23:48,157 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 413f22f22a5a41ca86fb048bc6a11117, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733325824608 2024-12-04T15:23:48,157 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02f16b51f1cd4403804f27973b15a041, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733325825836 2024-12-04T15:23:48,160 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:48,160 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:48,160 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:48,160 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b9703be162984068a0270fb2be851720, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/df8c437c786b48819e669eb5758084f7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/3b815580182149e1b5b7ed452ae2214f] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.6 K 2024-12-04T15:23:48,170 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b9703be162984068a0270fb2be851720, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1733325823368 2024-12-04T15:23:48,176 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting df8c437c786b48819e669eb5758084f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733325824608 2024-12-04T15:23:48,187 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b815580182149e1b5b7ed452ae2214f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733325825860 2024-12-04T15:23:48,200 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:48,204 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:48,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/0acbe5e414f641b4aa6a4b15ff499366 is 50, key is test_row_0/A:col10/1733325827083/Put/seqid=0 2024-12-04T15:23:48,234 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#517 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:48,234 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/53a8eaa27acf416fac0309c4903c5245 is 50, key is test_row_0/A:col10/1733325827028/Put/seqid=0 2024-12-04T15:23:48,290 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#518 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:48,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742427_1603 (size=12301) 2024-12-04T15:23:48,290 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1bf61aadca82468ca88bd3dfef4a593c is 50, key is test_row_0/B:col10/1733325827028/Put/seqid=0 2024-12-04T15:23:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:48,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:48,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742428_1604 (size=13017) 2024-12-04T15:23:48,351 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/53a8eaa27acf416fac0309c4903c5245 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/53a8eaa27acf416fac0309c4903c5245 2024-12-04T15:23:48,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742429_1605 (size=13017) 2024-12-04T15:23:48,361 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 53a8eaa27acf416fac0309c4903c5245(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:48,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:48,361 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=13, startTime=1733325828141; duration=0sec 2024-12-04T15:23:48,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:48,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:48,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:48,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325888349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325888352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,363 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325888353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,363 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:48,363 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:48,363 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5596662f92fc4876b52195b5c2e265f3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/42455b991e4644fc830b8856b8841c86, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fd67b93bc2c0467dbdc75eac606c7422] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.6 K 2024-12-04T15:23:48,364 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5596662f92fc4876b52195b5c2e265f3, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1733325823368 2024-12-04T15:23:48,364 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42455b991e4644fc830b8856b8841c86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733325824608 2024-12-04T15:23:48,365 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd67b93bc2c0467dbdc75eac606c7422, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733325825860 2024-12-04T15:23:48,365 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/1bf61aadca82468ca88bd3dfef4a593c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1bf61aadca82468ca88bd3dfef4a593c 2024-12-04T15:23:48,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325888362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,382 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 1bf61aadca82468ca88bd3dfef4a593c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:48,382 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:48,382 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=13, startTime=1733325828141; duration=0sec 2024-12-04T15:23:48,383 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:48,383 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:48,383 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:48,383 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/707e3667e0c34896bca49281ca7f4890 is 50, key is test_row_0/C:col10/1733325827028/Put/seqid=0 2024-12-04T15:23:48,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742430_1606 (size=13017) 2024-12-04T15:23:48,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325888468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325888468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325888469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325888473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325888675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325888677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325888677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325888679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,700 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/0acbe5e414f641b4aa6a4b15ff499366 2024-12-04T15:23:48,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/d912034856764d518d17978605fb916c is 50, key is test_row_0/B:col10/1733325827083/Put/seqid=0 2024-12-04T15:23:48,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742431_1607 (size=12301) 2024-12-04T15:23:48,899 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/707e3667e0c34896bca49281ca7f4890 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/707e3667e0c34896bca49281ca7f4890 2024-12-04T15:23:48,964 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 707e3667e0c34896bca49281ca7f4890(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:48,964 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:48,964 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=13, startTime=1733325828141; duration=0sec 2024-12-04T15:23:48,964 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:48,964 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:48,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325888990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325888990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325888990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:48,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:48,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325888990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:49,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:49,217 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/d912034856764d518d17978605fb916c 2024-12-04T15:23:49,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/0d8934ee0ec14aaea6aad6fbb8a256b3 is 50, key is test_row_0/C:col10/1733325827083/Put/seqid=0 2024-12-04T15:23:49,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:49,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57502 deadline: 1733325889272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:49,293 DEBUG [Thread-2373 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8264 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., hostname=645c2dbfef2e,42169,1733325683856, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:23:49,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742432_1608 (size=12301) 2024-12-04T15:23:49,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:49,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325889503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:49,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:49,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325889504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:49,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:49,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325889508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:49,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:49,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325889508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:49,700 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/0d8934ee0ec14aaea6aad6fbb8a256b3 2024-12-04T15:23:49,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/0acbe5e414f641b4aa6a4b15ff499366 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0acbe5e414f641b4aa6a4b15ff499366 2024-12-04T15:23:49,712 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0acbe5e414f641b4aa6a4b15ff499366, entries=150, sequenceid=324, filesize=12.0 K 2024-12-04T15:23:49,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/d912034856764d518d17978605fb916c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d912034856764d518d17978605fb916c 2024-12-04T15:23:49,717 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d912034856764d518d17978605fb916c, entries=150, sequenceid=324, filesize=12.0 K 2024-12-04T15:23:49,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/0d8934ee0ec14aaea6aad6fbb8a256b3 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0d8934ee0ec14aaea6aad6fbb8a256b3 2024-12-04T15:23:49,725 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0d8934ee0ec14aaea6aad6fbb8a256b3, entries=150, sequenceid=324, filesize=12.0 K 2024-12-04T15:23:49,726 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1522ms, sequenceid=324, compaction requested=false 2024-12-04T15:23:49,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:49,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:49,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-04T15:23:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-04T15:23:49,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-04T15:23:49,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8350 sec 2024-12-04T15:23:49,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.8580 sec 2024-12-04T15:23:50,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:50,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-04T15:23:50,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:50,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:50,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:50,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:50,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:50,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:50,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/afb584f879f144268f63c236c5001664 is 50, key is test_row_0/A:col10/1733325830523/Put/seqid=0 2024-12-04T15:23:50,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325890584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325890584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325890584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325890600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742433_1609 (size=12301) 2024-12-04T15:23:50,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/afb584f879f144268f63c236c5001664 2024-12-04T15:23:50,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/d4af9878af03477682afaad6d6c4cfe5 is 50, key is test_row_0/B:col10/1733325830523/Put/seqid=0 2024-12-04T15:23:50,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325890704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325890705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325890705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325890710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742434_1610 (size=12301) 2024-12-04T15:23:50,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/d4af9878af03477682afaad6d6c4cfe5 2024-12-04T15:23:50,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d2e52fcf27a54e6eb64b67c6f8b54de6 is 50, key is test_row_0/C:col10/1733325830523/Put/seqid=0 2024-12-04T15:23:50,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742435_1611 (size=12301) 2024-12-04T15:23:50,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d2e52fcf27a54e6eb64b67c6f8b54de6 2024-12-04T15:23:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/afb584f879f144268f63c236c5001664 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/afb584f879f144268f63c236c5001664 2024-12-04T15:23:50,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325890912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325890912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325890919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:50,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325890925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:50,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/afb584f879f144268f63c236c5001664, entries=150, sequenceid=346, filesize=12.0 K 2024-12-04T15:23:50,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/d4af9878af03477682afaad6d6c4cfe5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d4af9878af03477682afaad6d6c4cfe5 2024-12-04T15:23:50,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d4af9878af03477682afaad6d6c4cfe5, entries=150, sequenceid=346, filesize=12.0 K 2024-12-04T15:23:50,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/d2e52fcf27a54e6eb64b67c6f8b54de6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d2e52fcf27a54e6eb64b67c6f8b54de6 2024-12-04T15:23:51,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-04T15:23:51,021 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-04T15:23:51,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d2e52fcf27a54e6eb64b67c6f8b54de6, entries=150, sequenceid=346, filesize=12.0 K 2024-12-04T15:23:51,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:23:51,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-04T15:23:51,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-04T15:23:51,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 500ms, sequenceid=346, compaction requested=true 2024-12-04T15:23:51,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:51,033 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:51,037 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:23:51,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:51,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:51,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:51,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:51,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:51,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:23:51,040 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:51,040 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:51,040 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:51,040 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:51,040 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:23:51,040 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/53a8eaa27acf416fac0309c4903c5245, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0acbe5e414f641b4aa6a4b15ff499366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/afb584f879f144268f63c236c5001664] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.7 K 2024-12-04T15:23:51,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:23:51,044 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 53a8eaa27acf416fac0309c4903c5245, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733325825860 2024-12-04T15:23:51,052 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:51,052 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:51,052 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:51,052 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1bf61aadca82468ca88bd3dfef4a593c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d912034856764d518d17978605fb916c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d4af9878af03477682afaad6d6c4cfe5] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.7 K 2024-12-04T15:23:51,052 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0acbe5e414f641b4aa6a4b15ff499366, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733325827083 2024-12-04T15:23:51,056 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bf61aadca82468ca88bd3dfef4a593c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733325825860 2024-12-04T15:23:51,056 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting afb584f879f144268f63c236c5001664, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733325828326 2024-12-04T15:23:51,064 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d912034856764d518d17978605fb916c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733325827083 2024-12-04T15:23:51,071 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4af9878af03477682afaad6d6c4cfe5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733325828326 2024-12-04T15:23:51,111 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:51,111 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b9e5af8114234ea9aaad636e0acc1e7d is 50, key is test_row_0/A:col10/1733325830523/Put/seqid=0 2024-12-04T15:23:51,125 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:51,126 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/a4c331cf792341a5b64621aa0387aa7d is 50, key is test_row_0/B:col10/1733325830523/Put/seqid=0 2024-12-04T15:23:51,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-04T15:23:51,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742437_1613 (size=13119) 2024-12-04T15:23:51,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742436_1612 (size=13119) 2024-12-04T15:23:51,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-04T15:23:51,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:51,202 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-04T15:23:51,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:51,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5 is 50, key is test_row_0/A:col10/1733325830596/Put/seqid=0 2024-12-04T15:23:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:51,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. as already flushing 2024-12-04T15:23:51,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742438_1614 (size=12301) 2024-12-04T15:23:51,262 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5 2024-12-04T15:23:51,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/e1f3f172d8f0499f891b9bff29b18904 is 50, key is test_row_0/B:col10/1733325830596/Put/seqid=0 2024-12-04T15:23:51,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325891299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325891300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325891301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325891306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-04T15:23:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742439_1615 (size=12301) 2024-12-04T15:23:51,382 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/e1f3f172d8f0499f891b9bff29b18904 2024-12-04T15:23:51,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/4696c2ec2e4d443ab1ef66b7a7d2890f is 50, key is test_row_0/C:col10/1733325830596/Put/seqid=0 2024-12-04T15:23:51,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325891424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325891424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325891431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325891439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742440_1616 (size=12301) 2024-12-04T15:23:51,476 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/4696c2ec2e4d443ab1ef66b7a7d2890f 2024-12-04T15:23:51,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5 2024-12-04T15:23:51,563 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5, entries=150, sequenceid=363, filesize=12.0 K 2024-12-04T15:23:51,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/e1f3f172d8f0499f891b9bff29b18904 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/e1f3f172d8f0499f891b9bff29b18904 2024-12-04T15:23:51,596 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/a4c331cf792341a5b64621aa0387aa7d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a4c331cf792341a5b64621aa0387aa7d 2024-12-04T15:23:51,609 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/e1f3f172d8f0499f891b9bff29b18904, entries=150, sequenceid=363, filesize=12.0 K 2024-12-04T15:23:51,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/4696c2ec2e4d443ab1ef66b7a7d2890f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4696c2ec2e4d443ab1ef66b7a7d2890f 2024-12-04T15:23:51,628 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b9e5af8114234ea9aaad636e0acc1e7d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b9e5af8114234ea9aaad636e0acc1e7d 2024-12-04T15:23:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-04T15:23:51,638 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into a4c331cf792341a5b64621aa0387aa7d(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:51,638 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:51,638 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=13, startTime=1733325831038; duration=0sec 2024-12-04T15:23:51,638 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:51,638 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:51,638 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:51,640 DEBUG [Thread-2376 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x101a467e to 127.0.0.1:55739 2024-12-04T15:23:51,641 DEBUG [Thread-2376 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733325891640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,644 DEBUG [Thread-2380 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x738d3eb1 to 127.0.0.1:55739 2024-12-04T15:23:51,644 DEBUG [Thread-2380 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,645 DEBUG [Thread-2382 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a4579dd to 127.0.0.1:55739 2024-12-04T15:23:51,645 DEBUG [Thread-2382 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57426 deadline: 1733325891640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57428 deadline: 1733325891642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,649 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:51,649 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:51,649 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:51,649 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/707e3667e0c34896bca49281ca7f4890, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0d8934ee0ec14aaea6aad6fbb8a256b3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d2e52fcf27a54e6eb64b67c6f8b54de6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.7 K 2024-12-04T15:23:51,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:23:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57476 deadline: 1733325891651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:51,653 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 707e3667e0c34896bca49281ca7f4890, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733325825860 2024-12-04T15:23:51,653 DEBUG [Thread-2384 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70776486 to 127.0.0.1:55739 2024-12-04T15:23:51,653 DEBUG [Thread-2384 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,656 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4696c2ec2e4d443ab1ef66b7a7d2890f, entries=150, sequenceid=363, filesize=12.0 K 2024-12-04T15:23:51,657 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d8934ee0ec14aaea6aad6fbb8a256b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733325827083 2024-12-04T15:23:51,658 DEBUG [Thread-2378 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x717d73ef to 127.0.0.1:55739 2024-12-04T15:23:51,658 DEBUG [Thread-2378 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,658 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2e52fcf27a54e6eb64b67c6f8b54de6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733325828326 2024-12-04T15:23:51,658 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 456ms, sequenceid=363, compaction requested=false 2024-12-04T15:23:51,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:51,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:51,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-04T15:23:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-04T15:23:51,659 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into b9e5af8114234ea9aaad636e0acc1e7d(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:51,659 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:51,659 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=13, startTime=1733325831032; duration=0sec 2024-12-04T15:23:51,659 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:51,659 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:51,665 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#530 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:51,666 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/330ccb1f88b848cdbe2c2061271077e9 is 50, key is test_row_0/C:col10/1733325830523/Put/seqid=0 2024-12-04T15:23:51,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-04T15:23:51,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 624 msec 2024-12-04T15:23:51,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 651 msec 2024-12-04T15:23:51,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742441_1617 (size=13119) 2024-12-04T15:23:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:51,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-04T15:23:51,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:51,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:51,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:51,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:51,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:51,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:51,950 DEBUG [Thread-2369 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f6c410 to 127.0.0.1:55739 2024-12-04T15:23:51,950 DEBUG [Thread-2369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,952 DEBUG [Thread-2365 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22788836 to 127.0.0.1:55739 2024-12-04T15:23:51,952 DEBUG [Thread-2365 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,957 DEBUG [Thread-2367 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x029b4c46 to 127.0.0.1:55739 2024-12-04T15:23:51,957 DEBUG [Thread-2367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b188cb37840144b08ed64d949d59b1bf is 50, key is test_row_0/A:col10/1733325831947/Put/seqid=0 2024-12-04T15:23:51,960 DEBUG [Thread-2371 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cd1f95e to 127.0.0.1:55739 2024-12-04T15:23:51,960 DEBUG [Thread-2371 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:51,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742442_1618 (size=12301) 2024-12-04T15:23:51,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b188cb37840144b08ed64d949d59b1bf 2024-12-04T15:23:51,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/9bda4189ae4c4ce6a04167eb31bbd388 is 50, key is test_row_0/B:col10/1733325831947/Put/seqid=0 2024-12-04T15:23:51,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742443_1619 (size=12301) 2024-12-04T15:23:52,105 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/330ccb1f88b848cdbe2c2061271077e9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/330ccb1f88b848cdbe2c2061271077e9 2024-12-04T15:23:52,109 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 330ccb1f88b848cdbe2c2061271077e9(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:52,109 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:52,109 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=13, startTime=1733325831038; duration=0sec 2024-12-04T15:23:52,109 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:52,109 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-04T15:23:52,134 INFO [Thread-2375 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-04T15:23:52,214 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T15:23:52,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/9bda4189ae4c4ce6a04167eb31bbd388 2024-12-04T15:23:52,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/5fb62c3ef7c6441497f42dcd40418d42 is 50, key is test_row_0/C:col10/1733325831947/Put/seqid=0 2024-12-04T15:23:52,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742444_1620 (size=12301) 2024-12-04T15:23:52,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/5fb62c3ef7c6441497f42dcd40418d42 2024-12-04T15:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/b188cb37840144b08ed64d949d59b1bf as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b188cb37840144b08ed64d949d59b1bf 2024-12-04T15:23:52,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b188cb37840144b08ed64d949d59b1bf, entries=150, sequenceid=385, filesize=12.0 K 2024-12-04T15:23:52,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/9bda4189ae4c4ce6a04167eb31bbd388 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/9bda4189ae4c4ce6a04167eb31bbd388 2024-12-04T15:23:52,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/9bda4189ae4c4ce6a04167eb31bbd388, entries=150, sequenceid=385, filesize=12.0 K 2024-12-04T15:23:52,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/5fb62c3ef7c6441497f42dcd40418d42 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5fb62c3ef7c6441497f42dcd40418d42 2024-12-04T15:23:52,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5fb62c3ef7c6441497f42dcd40418d42, entries=150, sequenceid=385, filesize=12.0 K 2024-12-04T15:23:52,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=20.13 KB/20610 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 503ms, sequenceid=385, compaction requested=true 2024-12-04T15:23:52,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:52,452 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:52,453 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:52,453 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/A is initiating minor compaction (all files) 2024-12-04T15:23:52,453 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/A in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:52,453 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b9e5af8114234ea9aaad636e0acc1e7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b188cb37840144b08ed64d949d59b1bf] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.8 K 2024-12-04T15:23:52,454 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9e5af8114234ea9aaad636e0acc1e7d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733325828326 2024-12-04T15:23:52,454 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d06adfdc9c6f4bed8c7595bc0bd3ecd5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733325830578 2024-12-04T15:23:52,454 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b188cb37840144b08ed64d949d59b1bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733325831278 2024-12-04T15:23:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:23:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:23:52,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:52,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c46d02b12b7c26e3e20a64bcdd3bec5:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:23:52,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:23:52,460 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:52,471 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#A#compaction#534 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:52,471 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/f7048a0fd8df48edb0ac5ed04b1271e8 is 50, key is test_row_0/A:col10/1733325831947/Put/seqid=0 2024-12-04T15:23:52,472 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:52,472 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/B is initiating minor compaction (all files) 2024-12-04T15:23:52,472 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/B in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:52,472 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a4c331cf792341a5b64621aa0387aa7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/e1f3f172d8f0499f891b9bff29b18904, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/9bda4189ae4c4ce6a04167eb31bbd388] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.8 K 2024-12-04T15:23:52,476 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting a4c331cf792341a5b64621aa0387aa7d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733325828326 2024-12-04T15:23:52,480 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e1f3f172d8f0499f891b9bff29b18904, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733325830578 2024-12-04T15:23:52,484 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bda4189ae4c4ce6a04167eb31bbd388, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733325831278 2024-12-04T15:23:52,499 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#B#compaction#535 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:52,499 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/006705bd8f76475483408e5991ba3cfa is 50, key is test_row_0/B:col10/1733325831947/Put/seqid=0 2024-12-04T15:23:52,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742445_1621 (size=13221) 2024-12-04T15:23:52,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742446_1622 (size=13221) 2024-12-04T15:23:52,510 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/006705bd8f76475483408e5991ba3cfa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/006705bd8f76475483408e5991ba3cfa 2024-12-04T15:23:52,521 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/B of 1c46d02b12b7c26e3e20a64bcdd3bec5 into 006705bd8f76475483408e5991ba3cfa(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:52,521 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:52,521 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/B, priority=13, startTime=1733325832457; duration=0sec 2024-12-04T15:23:52,522 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:23:52,522 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:B 2024-12-04T15:23:52,522 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:23:52,522 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:23:52,523 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 1c46d02b12b7c26e3e20a64bcdd3bec5/C is initiating minor compaction (all files) 2024-12-04T15:23:52,523 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1c46d02b12b7c26e3e20a64bcdd3bec5/C in TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:52,523 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/330ccb1f88b848cdbe2c2061271077e9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4696c2ec2e4d443ab1ef66b7a7d2890f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5fb62c3ef7c6441497f42dcd40418d42] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp, totalSize=36.8 K 2024-12-04T15:23:52,523 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 330ccb1f88b848cdbe2c2061271077e9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733325828326 2024-12-04T15:23:52,523 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4696c2ec2e4d443ab1ef66b7a7d2890f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733325830578 2024-12-04T15:23:52,524 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fb62c3ef7c6441497f42dcd40418d42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733325831278 2024-12-04T15:23:52,550 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c46d02b12b7c26e3e20a64bcdd3bec5#C#compaction#536 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:23:52,551 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ca419c7b0bb4417eba1ad05e239e473e is 50, key is test_row_0/C:col10/1733325831947/Put/seqid=0 2024-12-04T15:23:52,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742447_1623 (size=13221) 2024-12-04T15:23:52,563 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/ca419c7b0bb4417eba1ad05e239e473e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ca419c7b0bb4417eba1ad05e239e473e 2024-12-04T15:23:52,569 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/C of 1c46d02b12b7c26e3e20a64bcdd3bec5 into ca419c7b0bb4417eba1ad05e239e473e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:52,569 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:52,569 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/C, priority=13, startTime=1733325832458; duration=0sec 2024-12-04T15:23:52,569 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:52,569 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:C 2024-12-04T15:23:52,903 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/f7048a0fd8df48edb0ac5ed04b1271e8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/f7048a0fd8df48edb0ac5ed04b1271e8 2024-12-04T15:23:52,907 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1c46d02b12b7c26e3e20a64bcdd3bec5/A of 1c46d02b12b7c26e3e20a64bcdd3bec5 into f7048a0fd8df48edb0ac5ed04b1271e8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:23:52,907 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:23:52,907 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5., storeName=1c46d02b12b7c26e3e20a64bcdd3bec5/A, priority=13, startTime=1733325832452; duration=0sec 2024-12-04T15:23:52,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:23:52,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c46d02b12b7c26e3e20a64bcdd3bec5:A 2024-12-04T15:23:59,345 DEBUG [Thread-2373 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x684e78a0 to 127.0.0.1:55739 2024-12-04T15:23:59,345 DEBUG [Thread-2373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1095 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3284 rows 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1090 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3270 rows 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1093 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3279 rows 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1082 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3245 rows 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1091 2024-12-04T15:23:59,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3273 rows 2024-12-04T15:23:59,346 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:23:59,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7815a655 to 127.0.0.1:55739 2024-12-04T15:23:59,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:23:59,353 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-04T15:23:59,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-04T15:23:59,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-04T15:23:59,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-04T15:23:59,363 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325839362"}]},"ts":"1733325839362"} 2024-12-04T15:23:59,364 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-04T15:23:59,367 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-04T15:23:59,368 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:23:59,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, UNASSIGN}] 2024-12-04T15:23:59,371 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, UNASSIGN 2024-12-04T15:23:59,372 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=1c46d02b12b7c26e3e20a64bcdd3bec5, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:59,373 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:23:59,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; CloseRegionProcedure 1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:23:59,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-04T15:23:59,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:23:59,525 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(124): Close 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:23:59,525 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:23:59,525 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1681): Closing 1c46d02b12b7c26e3e20a64bcdd3bec5, disabling compactions & flushes 2024-12-04T15:23:59,525 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:59,525 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. after waiting 0 ms 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:23:59,526 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(2837): Flushing 1c46d02b12b7c26e3e20a64bcdd3bec5 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=A 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=B 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1c46d02b12b7c26e3e20a64bcdd3bec5, store=C 2024-12-04T15:23:59,526 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:23:59,530 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/38d96624306d46fa880b7d23d3edbcc7 is 50, key is test_row_0/A:col10/1733325831951/Put/seqid=0 2024-12-04T15:23:59,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742448_1624 (size=9857) 2024-12-04T15:23:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-04T15:23:59,937 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/38d96624306d46fa880b7d23d3edbcc7 2024-12-04T15:23:59,944 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/854521c185d44fdb9c057daefc80271d is 50, key is test_row_0/B:col10/1733325831951/Put/seqid=0 2024-12-04T15:23:59,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742449_1625 (size=9857) 2024-12-04T15:23:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-04T15:24:00,352 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/854521c185d44fdb9c057daefc80271d 2024-12-04T15:24:00,363 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/fc9f02eebdef4eeaa72fe121997c5959 is 50, key is test_row_0/C:col10/1733325831951/Put/seqid=0 2024-12-04T15:24:00,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742450_1626 (size=9857) 2024-12-04T15:24:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-04T15:24:00,782 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/fc9f02eebdef4eeaa72fe121997c5959 2024-12-04T15:24:00,786 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/A/38d96624306d46fa880b7d23d3edbcc7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38d96624306d46fa880b7d23d3edbcc7 2024-12-04T15:24:00,789 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38d96624306d46fa880b7d23d3edbcc7, entries=100, sequenceid=396, filesize=9.6 K 2024-12-04T15:24:00,790 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/B/854521c185d44fdb9c057daefc80271d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/854521c185d44fdb9c057daefc80271d 2024-12-04T15:24:00,793 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/854521c185d44fdb9c057daefc80271d, entries=100, sequenceid=396, filesize=9.6 K 2024-12-04T15:24:00,794 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/.tmp/C/fc9f02eebdef4eeaa72fe121997c5959 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fc9f02eebdef4eeaa72fe121997c5959 2024-12-04T15:24:00,797 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fc9f02eebdef4eeaa72fe121997c5959, entries=100, sequenceid=396, filesize=9.6 K 2024-12-04T15:24:00,798 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 1c46d02b12b7c26e3e20a64bcdd3bec5 in 1272ms, sequenceid=396, compaction requested=false 2024-12-04T15:24:00,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/23a1b0fc8a8a4e13a2cd063f6eca3c34, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/5895d78eaa9848d699f941818a23dab9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2bc5a0d923d44b19a9d6f785c675272e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/3dd2c2789bb34074a7a7710c77641552, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38886d8c9a094966b8ed7fc3c6932aa8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d3e93c6b0560492da192fbcbbdc7a51f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/84d6a06e0ba44210bd3ab0d76f3bdd02, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b8a5624fdfa34e3a9389b3465dea4750, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0d5fa737059840068d007e4c3c77d7a6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2b33986f65ab4d1d9ce1dbe395e7c557, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/a28b9152f1f04392a7b5fc2aa04623f0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/419c19a5805044429af568f1cec839e3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/8c050425b22d47d9b42fe684e63d1b91, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/bfe3420fb0e34372b9cd80aa2ba0bbfc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2f203e60ee5d463bbaff836f70cd8559, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/db8566b6a1a04f7a8364ada033a85895, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/97565dfedb7642cba5547982827bc463, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d6a90098d1f84132b41b690bdb0fda3d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/dd65a23ceaa54a7db6fd93addae7ca60, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/413f22f22a5a41ca86fb048bc6a11117, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/02f16b51f1cd4403804f27973b15a041, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/53a8eaa27acf416fac0309c4903c5245, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0acbe5e414f641b4aa6a4b15ff499366, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b9e5af8114234ea9aaad636e0acc1e7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/afb584f879f144268f63c236c5001664, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b188cb37840144b08ed64d949d59b1bf] to archive 2024-12-04T15:24:00,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:24:00,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/23a1b0fc8a8a4e13a2cd063f6eca3c34 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/23a1b0fc8a8a4e13a2cd063f6eca3c34 2024-12-04T15:24:00,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/5895d78eaa9848d699f941818a23dab9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/5895d78eaa9848d699f941818a23dab9 2024-12-04T15:24:00,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2bc5a0d923d44b19a9d6f785c675272e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2bc5a0d923d44b19a9d6f785c675272e 2024-12-04T15:24:00,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/3dd2c2789bb34074a7a7710c77641552 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/3dd2c2789bb34074a7a7710c77641552 2024-12-04T15:24:00,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38886d8c9a094966b8ed7fc3c6932aa8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38886d8c9a094966b8ed7fc3c6932aa8 2024-12-04T15:24:00,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d3e93c6b0560492da192fbcbbdc7a51f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d3e93c6b0560492da192fbcbbdc7a51f 2024-12-04T15:24:00,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/84d6a06e0ba44210bd3ab0d76f3bdd02 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/84d6a06e0ba44210bd3ab0d76f3bdd02 2024-12-04T15:24:00,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b8a5624fdfa34e3a9389b3465dea4750 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b8a5624fdfa34e3a9389b3465dea4750 2024-12-04T15:24:00,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0d5fa737059840068d007e4c3c77d7a6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0d5fa737059840068d007e4c3c77d7a6 2024-12-04T15:24:00,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2b33986f65ab4d1d9ce1dbe395e7c557 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2b33986f65ab4d1d9ce1dbe395e7c557 2024-12-04T15:24:00,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/a28b9152f1f04392a7b5fc2aa04623f0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/a28b9152f1f04392a7b5fc2aa04623f0 2024-12-04T15:24:00,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/419c19a5805044429af568f1cec839e3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/419c19a5805044429af568f1cec839e3 2024-12-04T15:24:00,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/8c050425b22d47d9b42fe684e63d1b91 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/8c050425b22d47d9b42fe684e63d1b91 2024-12-04T15:24:00,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/bfe3420fb0e34372b9cd80aa2ba0bbfc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/bfe3420fb0e34372b9cd80aa2ba0bbfc 2024-12-04T15:24:00,818 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2f203e60ee5d463bbaff836f70cd8559 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/2f203e60ee5d463bbaff836f70cd8559 2024-12-04T15:24:00,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/db8566b6a1a04f7a8364ada033a85895 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/db8566b6a1a04f7a8364ada033a85895 2024-12-04T15:24:00,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/97565dfedb7642cba5547982827bc463 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/97565dfedb7642cba5547982827bc463 2024-12-04T15:24:00,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d6a90098d1f84132b41b690bdb0fda3d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d6a90098d1f84132b41b690bdb0fda3d 2024-12-04T15:24:00,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/dd65a23ceaa54a7db6fd93addae7ca60 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/dd65a23ceaa54a7db6fd93addae7ca60 2024-12-04T15:24:00,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/413f22f22a5a41ca86fb048bc6a11117 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/413f22f22a5a41ca86fb048bc6a11117 2024-12-04T15:24:00,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/02f16b51f1cd4403804f27973b15a041 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/02f16b51f1cd4403804f27973b15a041 2024-12-04T15:24:00,826 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/53a8eaa27acf416fac0309c4903c5245 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/53a8eaa27acf416fac0309c4903c5245 2024-12-04T15:24:00,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0acbe5e414f641b4aa6a4b15ff499366 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/0acbe5e414f641b4aa6a4b15ff499366 2024-12-04T15:24:00,829 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b9e5af8114234ea9aaad636e0acc1e7d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b9e5af8114234ea9aaad636e0acc1e7d 2024-12-04T15:24:00,830 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/afb584f879f144268f63c236c5001664 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/afb584f879f144268f63c236c5001664 2024-12-04T15:24:00,831 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/d06adfdc9c6f4bed8c7595bc0bd3ecd5 2024-12-04T15:24:00,832 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b188cb37840144b08ed64d949d59b1bf to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/b188cb37840144b08ed64d949d59b1bf 2024-12-04T15:24:00,834 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/424720a6f2bf429793ab330c9d5b5026, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/30dcfbc84bb6464388e74108c6883249, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bccb3dabeee24cc2bbf544439349d65a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/229fc3f3fd0c4392b09ce68261f48237, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ae9d2811bc9c4bcea09db6f77aecd3d4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bf278c7ea3e2419691224a651a682ad0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/8dc32097473e4a20a95aff467059942a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/80bbd91c1148461f97ac98664e5cca47, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1917a55981f54404b2611056b1c2cfd3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b2e2e785fe364c3bad54099c0c0e669f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fcd3617d70944a328a653930a1007b45, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/2727f7e8601843b78877e9dc27064073, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a1712c60bae0483b824f59031ca5180c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ff21cac52b964890abd35f1ea3c68468, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1259db9c467444b280212fae0c892c05, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/7e644bb115674cf391f0047d260c5713, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/dc7448663a4741769174325ea0b242a0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b9703be162984068a0270fb2be851720, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fe2cffd7d3764430bdc1cad7518b6526, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/df8c437c786b48819e669eb5758084f7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1bf61aadca82468ca88bd3dfef4a593c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/3b815580182149e1b5b7ed452ae2214f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d912034856764d518d17978605fb916c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a4c331cf792341a5b64621aa0387aa7d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d4af9878af03477682afaad6d6c4cfe5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/e1f3f172d8f0499f891b9bff29b18904, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/9bda4189ae4c4ce6a04167eb31bbd388] to archive 2024-12-04T15:24:00,835 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:24:00,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/424720a6f2bf429793ab330c9d5b5026 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/424720a6f2bf429793ab330c9d5b5026 2024-12-04T15:24:00,837 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/30dcfbc84bb6464388e74108c6883249 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/30dcfbc84bb6464388e74108c6883249 2024-12-04T15:24:00,838 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bccb3dabeee24cc2bbf544439349d65a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bccb3dabeee24cc2bbf544439349d65a 2024-12-04T15:24:00,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/229fc3f3fd0c4392b09ce68261f48237 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/229fc3f3fd0c4392b09ce68261f48237 2024-12-04T15:24:00,840 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ae9d2811bc9c4bcea09db6f77aecd3d4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ae9d2811bc9c4bcea09db6f77aecd3d4 2024-12-04T15:24:00,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bf278c7ea3e2419691224a651a682ad0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/bf278c7ea3e2419691224a651a682ad0 2024-12-04T15:24:00,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/8dc32097473e4a20a95aff467059942a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/8dc32097473e4a20a95aff467059942a 2024-12-04T15:24:00,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/80bbd91c1148461f97ac98664e5cca47 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/80bbd91c1148461f97ac98664e5cca47 2024-12-04T15:24:00,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1917a55981f54404b2611056b1c2cfd3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1917a55981f54404b2611056b1c2cfd3 2024-12-04T15:24:00,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b2e2e785fe364c3bad54099c0c0e669f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b2e2e785fe364c3bad54099c0c0e669f 2024-12-04T15:24:00,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fcd3617d70944a328a653930a1007b45 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fcd3617d70944a328a653930a1007b45 2024-12-04T15:24:00,848 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/2727f7e8601843b78877e9dc27064073 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/2727f7e8601843b78877e9dc27064073 2024-12-04T15:24:00,850 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a1712c60bae0483b824f59031ca5180c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a1712c60bae0483b824f59031ca5180c 2024-12-04T15:24:00,851 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ff21cac52b964890abd35f1ea3c68468 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/ff21cac52b964890abd35f1ea3c68468 2024-12-04T15:24:00,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1259db9c467444b280212fae0c892c05 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1259db9c467444b280212fae0c892c05 2024-12-04T15:24:00,855 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/7e644bb115674cf391f0047d260c5713 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/7e644bb115674cf391f0047d260c5713 2024-12-04T15:24:00,856 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/dc7448663a4741769174325ea0b242a0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/dc7448663a4741769174325ea0b242a0 2024-12-04T15:24:00,857 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b9703be162984068a0270fb2be851720 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/b9703be162984068a0270fb2be851720 2024-12-04T15:24:00,858 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fe2cffd7d3764430bdc1cad7518b6526 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/fe2cffd7d3764430bdc1cad7518b6526 2024-12-04T15:24:00,859 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/df8c437c786b48819e669eb5758084f7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/df8c437c786b48819e669eb5758084f7 2024-12-04T15:24:00,861 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1bf61aadca82468ca88bd3dfef4a593c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/1bf61aadca82468ca88bd3dfef4a593c 2024-12-04T15:24:00,862 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/3b815580182149e1b5b7ed452ae2214f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/3b815580182149e1b5b7ed452ae2214f 2024-12-04T15:24:00,863 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d912034856764d518d17978605fb916c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d912034856764d518d17978605fb916c 2024-12-04T15:24:00,864 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a4c331cf792341a5b64621aa0387aa7d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/a4c331cf792341a5b64621aa0387aa7d 2024-12-04T15:24:00,880 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d4af9878af03477682afaad6d6c4cfe5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/d4af9878af03477682afaad6d6c4cfe5 2024-12-04T15:24:00,886 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/e1f3f172d8f0499f891b9bff29b18904 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/e1f3f172d8f0499f891b9bff29b18904 2024-12-04T15:24:00,891 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/9bda4189ae4c4ce6a04167eb31bbd388 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/9bda4189ae4c4ce6a04167eb31bbd388 2024-12-04T15:24:00,896 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/7808a8783ba5432ca9889120dfc084ed, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/57713693d879448a919e60e78ad9dfb2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4f858110874a462d9cf1070a90feb174, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/c53b83434e8444bdb87c94f0e1022034, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bd917a0195484716a512c575c09f3e6c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0fbd26f4a1794c319fee3a5651ab0855, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d4e05164b6414433a3ccb73da6ae7dcb, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/f752113224a94908a978058c3714b403, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e786ea9140fb4f88bc1ffa2ed532f896, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d980863a7b404f8a947baf0e7e2b90a8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ad5bacbb79cd49e0809bf6828d44f929, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/a794cec60ad24328af81d62c3a0f0309, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e5d477522f144b3581aa1fd01a37d28f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/31dddba7cd47478f89c30b267e27908d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/479b9e4ea5534e748cc7f72f7df337c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ff1475c1c5a64d8aa313eb28cf02b1b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bbebe2aeff7e40c79c37c495b82a5dfd, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5596662f92fc4876b52195b5c2e265f3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/05941c8956fd4ebd99a61849e9ebce87, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/42455b991e4644fc830b8856b8841c86, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/707e3667e0c34896bca49281ca7f4890, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fd67b93bc2c0467dbdc75eac606c7422, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0d8934ee0ec14aaea6aad6fbb8a256b3, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/330ccb1f88b848cdbe2c2061271077e9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d2e52fcf27a54e6eb64b67c6f8b54de6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4696c2ec2e4d443ab1ef66b7a7d2890f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5fb62c3ef7c6441497f42dcd40418d42] to archive 2024-12-04T15:24:00,897 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:24:00,900 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/7808a8783ba5432ca9889120dfc084ed to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/7808a8783ba5432ca9889120dfc084ed 2024-12-04T15:24:00,903 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/57713693d879448a919e60e78ad9dfb2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/57713693d879448a919e60e78ad9dfb2 2024-12-04T15:24:00,904 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4f858110874a462d9cf1070a90feb174 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4f858110874a462d9cf1070a90feb174 2024-12-04T15:24:00,908 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/c53b83434e8444bdb87c94f0e1022034 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/c53b83434e8444bdb87c94f0e1022034 2024-12-04T15:24:00,910 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bd917a0195484716a512c575c09f3e6c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bd917a0195484716a512c575c09f3e6c 2024-12-04T15:24:00,911 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0fbd26f4a1794c319fee3a5651ab0855 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0fbd26f4a1794c319fee3a5651ab0855 2024-12-04T15:24:00,912 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d4e05164b6414433a3ccb73da6ae7dcb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d4e05164b6414433a3ccb73da6ae7dcb 2024-12-04T15:24:00,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/f752113224a94908a978058c3714b403 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/f752113224a94908a978058c3714b403 2024-12-04T15:24:00,914 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e786ea9140fb4f88bc1ffa2ed532f896 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e786ea9140fb4f88bc1ffa2ed532f896 2024-12-04T15:24:00,916 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d980863a7b404f8a947baf0e7e2b90a8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d980863a7b404f8a947baf0e7e2b90a8 2024-12-04T15:24:00,917 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ad5bacbb79cd49e0809bf6828d44f929 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ad5bacbb79cd49e0809bf6828d44f929 2024-12-04T15:24:00,918 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/a794cec60ad24328af81d62c3a0f0309 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/a794cec60ad24328af81d62c3a0f0309 2024-12-04T15:24:00,919 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e5d477522f144b3581aa1fd01a37d28f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/e5d477522f144b3581aa1fd01a37d28f 2024-12-04T15:24:00,920 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/31dddba7cd47478f89c30b267e27908d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/31dddba7cd47478f89c30b267e27908d 2024-12-04T15:24:00,921 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/479b9e4ea5534e748cc7f72f7df337c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/479b9e4ea5534e748cc7f72f7df337c4 2024-12-04T15:24:00,923 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ff1475c1c5a64d8aa313eb28cf02b1b6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ff1475c1c5a64d8aa313eb28cf02b1b6 2024-12-04T15:24:00,924 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bbebe2aeff7e40c79c37c495b82a5dfd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/bbebe2aeff7e40c79c37c495b82a5dfd 2024-12-04T15:24:00,925 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5596662f92fc4876b52195b5c2e265f3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5596662f92fc4876b52195b5c2e265f3 2024-12-04T15:24:00,926 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/05941c8956fd4ebd99a61849e9ebce87 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/05941c8956fd4ebd99a61849e9ebce87 2024-12-04T15:24:00,927 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/42455b991e4644fc830b8856b8841c86 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/42455b991e4644fc830b8856b8841c86 2024-12-04T15:24:00,928 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/707e3667e0c34896bca49281ca7f4890 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/707e3667e0c34896bca49281ca7f4890 2024-12-04T15:24:00,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fd67b93bc2c0467dbdc75eac606c7422 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fd67b93bc2c0467dbdc75eac606c7422 2024-12-04T15:24:00,931 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0d8934ee0ec14aaea6aad6fbb8a256b3 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/0d8934ee0ec14aaea6aad6fbb8a256b3 2024-12-04T15:24:00,932 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/330ccb1f88b848cdbe2c2061271077e9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/330ccb1f88b848cdbe2c2061271077e9 2024-12-04T15:24:00,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d2e52fcf27a54e6eb64b67c6f8b54de6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/d2e52fcf27a54e6eb64b67c6f8b54de6 2024-12-04T15:24:00,934 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4696c2ec2e4d443ab1ef66b7a7d2890f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/4696c2ec2e4d443ab1ef66b7a7d2890f 2024-12-04T15:24:00,935 DEBUG [StoreCloser-TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5fb62c3ef7c6441497f42dcd40418d42 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/5fb62c3ef7c6441497f42dcd40418d42 2024-12-04T15:24:00,944 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/recovered.edits/399.seqid, newMaxSeqId=399, maxSeqId=1 2024-12-04T15:24:00,945 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5. 2024-12-04T15:24:00,945 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1635): Region close journal for 1c46d02b12b7c26e3e20a64bcdd3bec5: 2024-12-04T15:24:00,947 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(170): Closed 1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:24:00,947 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=1c46d02b12b7c26e3e20a64bcdd3bec5, regionState=CLOSED 2024-12-04T15:24:00,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-04T15:24:00,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseRegionProcedure 1c46d02b12b7c26e3e20a64bcdd3bec5, server=645c2dbfef2e,42169,1733325683856 in 1.5750 sec 2024-12-04T15:24:00,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-04T15:24:00,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1c46d02b12b7c26e3e20a64bcdd3bec5, UNASSIGN in 1.5790 sec 2024-12-04T15:24:00,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-04T15:24:00,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5820 sec 2024-12-04T15:24:00,953 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325840953"}]},"ts":"1733325840953"} 2024-12-04T15:24:00,955 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-04T15:24:00,958 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-04T15:24:00,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6030 sec 2024-12-04T15:24:01,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-04T15:24:01,468 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-04T15:24:01,469 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-04T15:24:01,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,471 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-04T15:24:01,471 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=144, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,473 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:24:01,475 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/recovered.edits] 2024-12-04T15:24:01,478 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38d96624306d46fa880b7d23d3edbcc7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/38d96624306d46fa880b7d23d3edbcc7 2024-12-04T15:24:01,480 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/f7048a0fd8df48edb0ac5ed04b1271e8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/A/f7048a0fd8df48edb0ac5ed04b1271e8 2024-12-04T15:24:01,482 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/006705bd8f76475483408e5991ba3cfa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/006705bd8f76475483408e5991ba3cfa 2024-12-04T15:24:01,484 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/854521c185d44fdb9c057daefc80271d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/B/854521c185d44fdb9c057daefc80271d 2024-12-04T15:24:01,486 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ca419c7b0bb4417eba1ad05e239e473e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/ca419c7b0bb4417eba1ad05e239e473e 2024-12-04T15:24:01,487 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fc9f02eebdef4eeaa72fe121997c5959 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/C/fc9f02eebdef4eeaa72fe121997c5959 2024-12-04T15:24:01,491 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/recovered.edits/399.seqid to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5/recovered.edits/399.seqid 2024-12-04T15:24:01,491 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/1c46d02b12b7c26e3e20a64bcdd3bec5 2024-12-04T15:24:01,492 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-04T15:24:01,494 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=144, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,496 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-04T15:24:01,500 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-04T15:24:01,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=144, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-04T15:24:01,501 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733325841501"}]},"ts":"9223372036854775807"} 2024-12-04T15:24:01,505 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-04T15:24:01,505 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1c46d02b12b7c26e3e20a64bcdd3bec5, NAME => 'TestAcidGuarantees,,1733325808851.1c46d02b12b7c26e3e20a64bcdd3bec5.', STARTKEY => '', ENDKEY => ''}] 2024-12-04T15:24:01,505 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-04T15:24:01,506 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733325841505"}]},"ts":"9223372036854775807"} 2024-12-04T15:24:01,507 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-04T15:24:01,510 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=144, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 41 msec 2024-12-04T15:24:01,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-04T15:24:01,573 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-04T15:24:01,586 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237 (was 237), OpenFileDescriptor=447 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=975 (was 783) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1917 (was 3793) 2024-12-04T15:24:01,599 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=975, ProcessCount=11, AvailableMemoryMB=1917 2024-12-04T15:24:01,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:24:01,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:24:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:01,603 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:24:01,603 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:01,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 145 2024-12-04T15:24:01,604 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:24:01,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-04T15:24:01,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742451_1627 (size=963) 2024-12-04T15:24:01,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-04T15:24:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-04T15:24:02,020 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c 2024-12-04T15:24:02,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742452_1628 (size=53) 2024-12-04T15:24:02,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-04T15:24:02,431 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:24:02,431 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5345b80b290e2620248a8fde2595e371, disabling compactions & flushes 2024-12-04T15:24:02,431 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,431 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,431 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. after waiting 0 ms 2024-12-04T15:24:02,431 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,431 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,431 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:02,433 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:24:02,433 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733325842433"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733325842433"}]},"ts":"1733325842433"} 2024-12-04T15:24:02,434 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-04T15:24:02,434 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:24:02,435 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325842435"}]},"ts":"1733325842435"} 2024-12-04T15:24:02,435 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-04T15:24:02,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, ASSIGN}] 2024-12-04T15:24:02,448 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, ASSIGN 2024-12-04T15:24:02,449 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, ASSIGN; state=OFFLINE, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=false 2024-12-04T15:24:02,599 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:02,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; OpenRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:24:02,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-04T15:24:02,753 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:02,756 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,756 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7285): Opening region: {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:24:02,757 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,757 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:24:02,757 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7327): checking encryption for 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,757 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7330): checking classloading for 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,758 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,760 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:24:02,760 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5345b80b290e2620248a8fde2595e371 columnFamilyName A 2024-12-04T15:24:02,760 DEBUG [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:02,761 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(327): Store=5345b80b290e2620248a8fde2595e371/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:24:02,761 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,762 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:24:02,762 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5345b80b290e2620248a8fde2595e371 columnFamilyName B 2024-12-04T15:24:02,762 DEBUG [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:02,763 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(327): Store=5345b80b290e2620248a8fde2595e371/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:24:02,763 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,764 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:24:02,764 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5345b80b290e2620248a8fde2595e371 columnFamilyName C 2024-12-04T15:24:02,764 DEBUG [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:02,765 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(327): Store=5345b80b290e2620248a8fde2595e371/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:24:02,765 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,766 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,766 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,768 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:24:02,769 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1085): writing seq id for 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:02,770 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:24:02,771 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1102): Opened 5345b80b290e2620248a8fde2595e371; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74641901, jitterRate=0.11225099861621857}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:24:02,771 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1001): Region open journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:02,772 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., pid=147, masterSystemTime=1733325842752 2024-12-04T15:24:02,773 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,773 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:02,774 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=OPEN, openSeqNum=2, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:02,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-04T15:24:02,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; OpenRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 in 173 msec 2024-12-04T15:24:02,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-04T15:24:02,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, ASSIGN in 335 msec 2024-12-04T15:24:02,778 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:24:02,778 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325842778"}]},"ts":"1733325842778"} 2024-12-04T15:24:02,779 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-04T15:24:02,783 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:24:02,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1820 sec 2024-12-04T15:24:03,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-04T15:24:03,710 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-12-04T15:24:03,711 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53e238d3 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e514c9f 2024-12-04T15:24:03,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c554741, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:03,726 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:03,728 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:03,729 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:24:03,732 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:24:03,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-04T15:24:03,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:24:03,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:03,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742453_1629 (size=999) 2024-12-04T15:24:04,166 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-04T15:24:04,166 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-04T15:24:04,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:24:04,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, REOPEN/MOVE}] 2024-12-04T15:24:04,172 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, REOPEN/MOVE 2024-12-04T15:24:04,173 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,174 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:24:04,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:24:04,325 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,326 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,326 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:24:04,326 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing 5345b80b290e2620248a8fde2595e371, disabling compactions & flushes 2024-12-04T15:24:04,326 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,326 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,326 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. after waiting 0 ms 2024-12-04T15:24:04,326 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,331 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-04T15:24:04,331 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,331 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:04,331 WARN [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionServer(3786): Not adding moved region record: 5345b80b290e2620248a8fde2595e371 to self. 2024-12-04T15:24:04,333 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,333 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=CLOSED 2024-12-04T15:24:04,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-04T15:24:04,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 in 160 msec 2024-12-04T15:24:04,342 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, REOPEN/MOVE; state=CLOSED, location=645c2dbfef2e,42169,1733325683856; forceNewPlan=false, retain=true 2024-12-04T15:24:04,493 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=OPENING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE; OpenRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:24:04,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,648 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,648 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7285): Opening region: {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:24:04,649 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,649 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:24:04,649 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7327): checking encryption for 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,649 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7330): checking classloading for 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,651 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,651 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:24:04,652 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5345b80b290e2620248a8fde2595e371 columnFamilyName A 2024-12-04T15:24:04,653 DEBUG [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:04,653 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(327): Store=5345b80b290e2620248a8fde2595e371/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:24:04,654 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,655 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:24:04,655 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5345b80b290e2620248a8fde2595e371 columnFamilyName B 2024-12-04T15:24:04,655 DEBUG [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:04,656 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(327): Store=5345b80b290e2620248a8fde2595e371/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:24:04,656 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,656 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-04T15:24:04,657 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5345b80b290e2620248a8fde2595e371 columnFamilyName C 2024-12-04T15:24:04,657 DEBUG [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:04,657 INFO [StoreOpener-5345b80b290e2620248a8fde2595e371-1 {}] regionserver.HStore(327): Store=5345b80b290e2620248a8fde2595e371/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:24:04,657 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,658 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,659 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,660 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T15:24:04,662 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1085): writing seq id for 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,664 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1102): Opened 5345b80b290e2620248a8fde2595e371; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67341502, jitterRate=0.003466576337814331}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T15:24:04,665 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1001): Region open journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:04,665 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., pid=152, masterSystemTime=1733325844645 2024-12-04T15:24:04,667 DEBUG [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,667 INFO [RS_OPEN_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,667 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=OPEN, openSeqNum=5, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=150 2024-12-04T15:24:04,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=150, state=SUCCESS; OpenRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 in 174 msec 2024-12-04T15:24:04,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-04T15:24:04,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, REOPEN/MOVE in 499 msec 2024-12-04T15:24:04,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-04T15:24:04,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 504 msec 2024-12-04T15:24:04,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 939 msec 2024-12-04T15:24:04,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-04T15:24:04,678 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x401484c4 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4650ded 2024-12-04T15:24:04,681 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72fc2d6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,682 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04cd1462 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28eb6b73 2024-12-04T15:24:04,694 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b6fbc54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b3a2c2f to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@187972c8 2024-12-04T15:24:04,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e32e08f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,703 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x211bcc55 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7227158c 2024-12-04T15:24:04,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b58a5df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,710 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x096b11d1 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@432afe05 2024-12-04T15:24:04,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d22dde2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x663aa62c to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ddcbac4 2024-12-04T15:24:04,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@685d43aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c6369bd to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2441e86f 2024-12-04T15:24:04,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2603af5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x531e99d9 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b70145 2024-12-04T15:24:04,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43753852, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,742 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x192bd4a8 to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59d643e1 2024-12-04T15:24:04,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338b5721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,754 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2855176e to 127.0.0.1:55739 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@437780ce 2024-12-04T15:24:04,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aace5de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:24:04,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:04,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-04T15:24:04,782 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:04,783 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:04,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:04,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:04,790 DEBUG [hconnection-0x2692ad3b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,791 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,796 DEBUG [hconnection-0x1bb3dbe7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,797 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,800 DEBUG [hconnection-0x5fbebee6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,801 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,804 DEBUG [hconnection-0x7306aa7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,805 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:04,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:24:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:04,808 DEBUG [hconnection-0x79cc324f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:04,808 DEBUG [hconnection-0x12840265-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:04,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:04,812 DEBUG [hconnection-0x14f3e774-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,815 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,816 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,816 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,828 DEBUG [hconnection-0x69c10bd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,829 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,836 DEBUG [hconnection-0x365989da-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,837 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,840 DEBUG [hconnection-0x5fbf94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:24:04,843 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:24:04,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325904848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325904849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325904853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325904868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325904868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:04,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204ef8eaf9bb2f64191bab5a03a935e4084_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325844804/Put/seqid=0 2024-12-04T15:24:04,940 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:04,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:04,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:04,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325904954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325904957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325904962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325904972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325904971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:04,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742454_1630 (size=12154) 2024-12-04T15:24:05,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:05,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:05,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:05,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325905163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325905163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325905169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325905180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325905189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,264 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:05,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:05,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,268 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:05,400 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:05,410 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204ef8eaf9bb2f64191bab5a03a935e4084_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204ef8eaf9bb2f64191bab5a03a935e4084_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:05,411 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/ea0fbf495d4e4de3bc14c8bb2e2d0490, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:05,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/ea0fbf495d4e4de3bc14c8bb2e2d0490 is 175, key is test_row_0/A:col10/1733325844804/Put/seqid=0 2024-12-04T15:24:05,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742455_1631 (size=30955) 2024-12-04T15:24:05,456 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/ea0fbf495d4e4de3bc14c8bb2e2d0490 2024-12-04T15:24:05,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325905472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325905473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325905480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325905492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325905500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1ed521b7cc534f6ba02405c688cd423d is 50, key is test_row_0/B:col10/1733325844804/Put/seqid=0 2024-12-04T15:24:05,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:05,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:05,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742456_1632 (size=12001) 2024-12-04T15:24:05,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:05,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:05,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:05,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:05,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:05,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325905979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325905979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:05,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325905988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325906005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325906010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1ed521b7cc534f6ba02405c688cd423d 2024-12-04T15:24:06,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:06,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:06,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/34149d29674e4bd29ae70c3184a62522 is 50, key is test_row_0/C:col10/1733325844804/Put/seqid=0 2024-12-04T15:24:06,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742457_1633 (size=12001) 2024-12-04T15:24:06,281 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:06,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:06,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,460 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:06,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:06,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,521 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T15:24:06,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/34149d29674e4bd29ae70c3184a62522 2024-12-04T15:24:06,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:06,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:06,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:06,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/ea0fbf495d4e4de3bc14c8bb2e2d0490 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490 2024-12-04T15:24:06,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490, entries=150, sequenceid=15, filesize=30.2 K 2024-12-04T15:24:06,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1ed521b7cc534f6ba02405c688cd423d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1ed521b7cc534f6ba02405c688cd423d 2024-12-04T15:24:06,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1ed521b7cc534f6ba02405c688cd423d, entries=150, sequenceid=15, filesize=11.7 K 2024-12-04T15:24:06,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/34149d29674e4bd29ae70c3184a62522 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/34149d29674e4bd29ae70c3184a62522 2024-12-04T15:24:06,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/34149d29674e4bd29ae70c3184a62522, entries=150, sequenceid=15, filesize=11.7 K 2024-12-04T15:24:06,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5345b80b290e2620248a8fde2595e371 in 1857ms, sequenceid=15, compaction requested=false 2024-12-04T15:24:06,665 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-04T15:24:06,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:06,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:06,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-04T15:24:06,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:06,785 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:24:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:06,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fa61343e0307448ab59b599b389878fe_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325844848/Put/seqid=0 2024-12-04T15:24:06,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742458_1634 (size=12154) 2024-12-04T15:24:06,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:06,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:06,901 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fa61343e0307448ab59b599b389878fe_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fa61343e0307448ab59b599b389878fe_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:06,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6565a561d65c41e7a598ea5b808592ad, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:06,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6565a561d65c41e7a598ea5b808592ad is 175, key is test_row_0/A:col10/1733325844848/Put/seqid=0 2024-12-04T15:24:06,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742459_1635 (size=30955) 2024-12-04T15:24:07,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:07,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:07,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325907022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325907029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325907031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325907025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325907033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325907144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325907148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325907151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325907346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325907358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325907367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,388 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6565a561d65c41e7a598ea5b808592ad 2024-12-04T15:24:07,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/e0ee9826896c47b3ac679e55358530f4 is 50, key is test_row_0/B:col10/1733325844848/Put/seqid=0 2024-12-04T15:24:07,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742460_1636 (size=12001) 2024-12-04T15:24:07,480 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/e0ee9826896c47b3ac679e55358530f4 2024-12-04T15:24:07,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/4e04687bd5284ad494533c9e803ae182 is 50, key is test_row_0/C:col10/1733325844848/Put/seqid=0 2024-12-04T15:24:07,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742461_1637 (size=12001) 2024-12-04T15:24:07,552 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/4e04687bd5284ad494533c9e803ae182 2024-12-04T15:24:07,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6565a561d65c41e7a598ea5b808592ad as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad 2024-12-04T15:24:07,594 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad, entries=150, sequenceid=40, filesize=30.2 K 2024-12-04T15:24:07,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/e0ee9826896c47b3ac679e55358530f4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/e0ee9826896c47b3ac679e55358530f4 2024-12-04T15:24:07,640 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/e0ee9826896c47b3ac679e55358530f4, entries=150, sequenceid=40, filesize=11.7 K 2024-12-04T15:24:07,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/4e04687bd5284ad494533c9e803ae182 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e04687bd5284ad494533c9e803ae182 2024-12-04T15:24:07,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325907658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325907668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,674 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e04687bd5284ad494533c9e803ae182, entries=150, sequenceid=40, filesize=11.7 K 2024-12-04T15:24:07,680 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 5345b80b290e2620248a8fde2595e371 in 895ms, sequenceid=40, compaction requested=false 2024-12-04T15:24:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-04T15:24:07,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-04T15:24:07,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:07,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:24:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:07,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-04T15:24:07,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9070 sec 2024-12-04T15:24:07,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.9170 sec 2024-12-04T15:24:07,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412041ab89625e3cb4562a7502cf5bf3eee25_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:07,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742462_1638 (size=12154) 2024-12-04T15:24:07,754 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:07,759 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412041ab89625e3cb4562a7502cf5bf3eee25_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412041ab89625e3cb4562a7502cf5bf3eee25_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:07,761 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fc6c959dde7415c8f8ec11921b1ebd4, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:07,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fc6c959dde7415c8f8ec11921b1ebd4 is 175, key is test_row_0/A:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:07,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742463_1639 (size=30955) 2024-12-04T15:24:07,817 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fc6c959dde7415c8f8ec11921b1ebd4 2024-12-04T15:24:07,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/73103e0d0230457d9feebe81df4f7fa6 is 50, key is test_row_0/B:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:07,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325907877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:07,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742464_1640 (size=12001) 2024-12-04T15:24:07,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325907983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:08,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325908172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:08,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325908177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:08,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325908191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:08,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/73103e0d0230457d9feebe81df4f7fa6 2024-12-04T15:24:08,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/40c4045f081542b5aeeef7de46b8afed is 50, key is test_row_0/C:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:08,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325908495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:08,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742465_1641 (size=12001) 2024-12-04T15:24:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-04T15:24:08,901 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-04T15:24:08,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/40c4045f081542b5aeeef7de46b8afed 2024-12-04T15:24:08,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-04T15:24:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:08,910 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:08,914 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:08,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:08,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fc6c959dde7415c8f8ec11921b1ebd4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4 2024-12-04T15:24:09,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325908998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:09,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4, entries=150, sequenceid=53, filesize=30.2 K 2024-12-04T15:24:09,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/73103e0d0230457d9feebe81df4f7fa6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/73103e0d0230457d9feebe81df4f7fa6 2024-12-04T15:24:09,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325909047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,052 DEBUG [Thread-2757 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4183 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:24:09,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325909051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,060 DEBUG [Thread-2755 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:24:09,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:09,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:09,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/73103e0d0230457d9feebe81df4f7fa6, entries=150, sequenceid=53, filesize=11.7 K 2024-12-04T15:24:09,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/40c4045f081542b5aeeef7de46b8afed as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/40c4045f081542b5aeeef7de46b8afed 2024-12-04T15:24:09,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/40c4045f081542b5aeeef7de46b8afed, entries=150, sequenceid=53, filesize=11.7 K 2024-12-04T15:24:09,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5345b80b290e2620248a8fde2595e371 in 1456ms, sequenceid=53, compaction requested=true 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:09,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:24:09,152 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:09,152 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:09,164 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:09,164 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:09,164 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,164 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=90.7 K 2024-12-04T15:24:09,164 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,164 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4] 2024-12-04T15:24:09,168 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:09,168 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:09,168 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,168 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/34149d29674e4bd29ae70c3184a62522, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e04687bd5284ad494533c9e803ae182, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/40c4045f081542b5aeeef7de46b8afed] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=35.2 K 2024-12-04T15:24:09,172 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea0fbf495d4e4de3bc14c8bb2e2d0490, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733325844798 2024-12-04T15:24:09,172 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 34149d29674e4bd29ae70c3184a62522, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733325844798 2024-12-04T15:24:09,173 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e04687bd5284ad494533c9e803ae182, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733325844832 2024-12-04T15:24:09,173 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6565a561d65c41e7a598ea5b808592ad, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733325844832 2024-12-04T15:24:09,180 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 40c4045f081542b5aeeef7de46b8afed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733325847018 2024-12-04T15:24:09,182 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fc6c959dde7415c8f8ec11921b1ebd4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733325847018 2024-12-04T15:24:09,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:09,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:24:09,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:09,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:09,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:09,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:09,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:09,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:09,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:09,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044c9e4817b5b343a7b7ffac364cb1a453_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325847870/Put/seqid=0 2024-12-04T15:24:09,234 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#550 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:09,235 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/20afed4e64104bc9a278501b21ebb97d is 50, key is test_row_0/C:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:09,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:09,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:09,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,252 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:09,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742466_1642 (size=12154) 2024-12-04T15:24:09,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325909259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,265 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:09,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325909265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,278 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044c9e4817b5b343a7b7ffac364cb1a453_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044c9e4817b5b343a7b7ffac364cb1a453_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:09,278 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3c83100b07f44b0f8489434b07b779ba, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:09,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3c83100b07f44b0f8489434b07b779ba is 175, key is test_row_0/A:col10/1733325847870/Put/seqid=0 2024-12-04T15:24:09,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742467_1643 (size=12104) 2024-12-04T15:24:09,292 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204c2556da5560c45e4b8cea19a81ba0aff_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:09,294 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204c2556da5560c45e4b8cea19a81ba0aff_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:09,294 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c2556da5560c45e4b8cea19a81ba0aff_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:09,332 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/20afed4e64104bc9a278501b21ebb97d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/20afed4e64104bc9a278501b21ebb97d 2024-12-04T15:24:09,348 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into 20afed4e64104bc9a278501b21ebb97d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:09,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:09,348 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=13, startTime=1733325849148; duration=0sec 2024-12-04T15:24:09,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:09,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:09,348 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:09,349 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:09,349 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:09,349 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,349 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1ed521b7cc534f6ba02405c688cd423d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/e0ee9826896c47b3ac679e55358530f4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/73103e0d0230457d9feebe81df4f7fa6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=35.2 K 2024-12-04T15:24:09,350 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ed521b7cc534f6ba02405c688cd423d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733325844798 2024-12-04T15:24:09,350 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e0ee9826896c47b3ac679e55358530f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733325844832 2024-12-04T15:24:09,351 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 73103e0d0230457d9feebe81df4f7fa6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733325847018 2024-12-04T15:24:09,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742469_1645 (size=4469) 2024-12-04T15:24:09,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742468_1644 (size=30955) 2024-12-04T15:24:09,368 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#552 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:09,368 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9e0b93f11feb4ff6a6ae35c6b4177786 is 50, key is test_row_0/B:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:09,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325909366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325909376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:09,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:09,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742470_1646 (size=12104) 2024-12-04T15:24:09,414 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9e0b93f11feb4ff6a6ae35c6b4177786 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9e0b93f11feb4ff6a6ae35c6b4177786 2024-12-04T15:24:09,420 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into 9e0b93f11feb4ff6a6ae35c6b4177786(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:09,420 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:09,420 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=13, startTime=1733325849148; duration=0sec 2024-12-04T15:24:09,421 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:09,421 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:09,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:09,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325909584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325909588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,752 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#551 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:09,752 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3c83100b07f44b0f8489434b07b779ba 2024-12-04T15:24:09,753 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/98223fe4747b4eebaf5e615fac66832a is 175, key is test_row_0/A:col10/1733325847024/Put/seqid=0 2024-12-04T15:24:09,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742471_1647 (size=31058) 2024-12-04T15:24:09,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9246ae95f3334fd1abdbedd6a9c12aa0 is 50, key is test_row_0/B:col10/1733325847870/Put/seqid=0 2024-12-04T15:24:09,798 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/98223fe4747b4eebaf5e615fac66832a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/98223fe4747b4eebaf5e615fac66832a 2024-12-04T15:24:09,805 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into 98223fe4747b4eebaf5e615fac66832a(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:09,805 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:09,805 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=13, startTime=1733325849148; duration=0sec 2024-12-04T15:24:09,806 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:09,806 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742472_1648 (size=12001) 2024-12-04T15:24:09,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325909896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,900 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:09,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:09,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:09,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:09,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:09,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:09,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325909912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:10,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:10,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325910023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:10,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:10,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:10,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:10,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9246ae95f3334fd1abdbedd6a9c12aa0 2024-12-04T15:24:10,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/293ae11f8eef447d9ac9e5438d7ea94e is 50, key is test_row_0/C:col10/1733325847870/Put/seqid=0 2024-12-04T15:24:10,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742473_1649 (size=12001) 2024-12-04T15:24:10,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/293ae11f8eef447d9ac9e5438d7ea94e 2024-12-04T15:24:10,377 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:10,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:10,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:10,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3c83100b07f44b0f8489434b07b779ba as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba 2024-12-04T15:24:10,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325910405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba, entries=150, sequenceid=77, filesize=30.2 K 2024-12-04T15:24:10,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9246ae95f3334fd1abdbedd6a9c12aa0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9246ae95f3334fd1abdbedd6a9c12aa0 2024-12-04T15:24:10,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325910422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9246ae95f3334fd1abdbedd6a9c12aa0, entries=150, sequenceid=77, filesize=11.7 K 2024-12-04T15:24:10,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/293ae11f8eef447d9ac9e5438d7ea94e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/293ae11f8eef447d9ac9e5438d7ea94e 2024-12-04T15:24:10,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/293ae11f8eef447d9ac9e5438d7ea94e, entries=150, sequenceid=77, filesize=11.7 K 2024-12-04T15:24:10,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 5345b80b290e2620248a8fde2595e371 in 1279ms, sequenceid=77, compaction requested=false 2024-12-04T15:24:10,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:10,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:10,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-04T15:24:10,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:10,540 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:24:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204b2321b8effd34d95b18ec6d314f2d5c2_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325849255/Put/seqid=0 2024-12-04T15:24:10,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742474_1650 (size=12154) 2024-12-04T15:24:11,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:11,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:11,084 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204b2321b8effd34d95b18ec6d314f2d5c2_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204b2321b8effd34d95b18ec6d314f2d5c2_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:11,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/2598bd88ef154b49b8b86c9fca024a0a, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:11,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/2598bd88ef154b49b8b86c9fca024a0a is 175, key is test_row_0/A:col10/1733325849255/Put/seqid=0 2024-12-04T15:24:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742475_1651 (size=30955) 2024-12-04T15:24:11,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:11,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325911468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:11,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325911470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:11,533 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/2598bd88ef154b49b8b86c9fca024a0a 2024-12-04T15:24:11,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/eb54a477650d451bb8449f0ef5c42cd6 is 50, key is test_row_0/B:col10/1733325849255/Put/seqid=0 2024-12-04T15:24:11,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742476_1652 (size=12001) 2024-12-04T15:24:11,566 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/eb54a477650d451bb8449f0ef5c42cd6 2024-12-04T15:24:11,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:11,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325911572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:11,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325911573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:11,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/3885a155af014b448b516d5ba9e842f6 is 50, key is test_row_0/C:col10/1733325849255/Put/seqid=0 2024-12-04T15:24:11,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742477_1653 (size=12001) 2024-12-04T15:24:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325911775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:11,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325911784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:11,991 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/3885a155af014b448b516d5ba9e842f6 2024-12-04T15:24:11,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/2598bd88ef154b49b8b86c9fca024a0a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a 2024-12-04T15:24:11,999 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a, entries=150, sequenceid=93, filesize=30.2 K 2024-12-04T15:24:12,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/eb54a477650d451bb8449f0ef5c42cd6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb54a477650d451bb8449f0ef5c42cd6 2024-12-04T15:24:12,005 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb54a477650d451bb8449f0ef5c42cd6, entries=150, sequenceid=93, filesize=11.7 K 2024-12-04T15:24:12,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/3885a155af014b448b516d5ba9e842f6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/3885a155af014b448b516d5ba9e842f6 2024-12-04T15:24:12,009 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/3885a155af014b448b516d5ba9e842f6, entries=150, sequenceid=93, filesize=11.7 K 2024-12-04T15:24:12,010 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5345b80b290e2620248a8fde2595e371 in 1470ms, sequenceid=93, compaction requested=true 2024-12-04T15:24:12,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:12,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:12,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-04T15:24:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-04T15:24:12,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-04T15:24:12,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0970 sec 2024-12-04T15:24:12,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 3.1350 sec 2024-12-04T15:24:12,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:12,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:24:12,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:12,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:12,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:12,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120445d0306f5e9b43d3b8ca5560a9199077_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742478_1654 (size=14594) 2024-12-04T15:24:12,057 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:12,061 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120445d0306f5e9b43d3b8ca5560a9199077_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120445d0306f5e9b43d3b8ca5560a9199077_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:12,062 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e1ecec2448854ff9acb4e9f6fa324fc2, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e1ecec2448854ff9acb4e9f6fa324fc2 is 175, key is test_row_0/A:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742479_1655 (size=39549) 2024-12-04T15:24:12,073 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e1ecec2448854ff9acb4e9f6fa324fc2 2024-12-04T15:24:12,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325912079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325912080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1d23a6152afd4c9ea17b3332e996c0b6 is 50, key is test_row_0/B:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325912088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742480_1656 (size=12001) 2024-12-04T15:24:12,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1d23a6152afd4c9ea17b3332e996c0b6 2024-12-04T15:24:12,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/b96687febea94d28bfaf334528b1eeee is 50, key is test_row_0/C:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742481_1657 (size=12001) 2024-12-04T15:24:12,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/b96687febea94d28bfaf334528b1eeee 2024-12-04T15:24:12,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325912188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e1ecec2448854ff9acb4e9f6fa324fc2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2 2024-12-04T15:24:12,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2, entries=200, sequenceid=117, filesize=38.6 K 2024-12-04T15:24:12,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1d23a6152afd4c9ea17b3332e996c0b6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1d23a6152afd4c9ea17b3332e996c0b6 2024-12-04T15:24:12,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1d23a6152afd4c9ea17b3332e996c0b6, entries=150, sequenceid=117, filesize=11.7 K 2024-12-04T15:24:12,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/b96687febea94d28bfaf334528b1eeee as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/b96687febea94d28bfaf334528b1eeee 2024-12-04T15:24:12,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/b96687febea94d28bfaf334528b1eeee, entries=150, sequenceid=117, filesize=11.7 K 2024-12-04T15:24:12,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5345b80b290e2620248a8fde2595e371 in 205ms, sequenceid=117, compaction requested=true 2024-12-04T15:24:12,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:12,249 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:12,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:12,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:12,249 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:12,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:12,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:12,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:12,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:12,251 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:12,251 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:12,251 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:12,252 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/98223fe4747b4eebaf5e615fac66832a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=129.4 K 2024-12-04T15:24:12,252 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:12,252 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/98223fe4747b4eebaf5e615fac66832a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2] 2024-12-04T15:24:12,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:12,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:12,253 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:12,253 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9e0b93f11feb4ff6a6ae35c6b4177786, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9246ae95f3334fd1abdbedd6a9c12aa0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb54a477650d451bb8449f0ef5c42cd6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1d23a6152afd4c9ea17b3332e996c0b6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=47.0 K 2024-12-04T15:24:12,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98223fe4747b4eebaf5e615fac66832a, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733325847018 2024-12-04T15:24:12,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e0b93f11feb4ff6a6ae35c6b4177786, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733325847018 2024-12-04T15:24:12,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c83100b07f44b0f8489434b07b779ba, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733325847762 2024-12-04T15:24:12,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9246ae95f3334fd1abdbedd6a9c12aa0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733325847762 2024-12-04T15:24:12,254 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2598bd88ef154b49b8b86c9fca024a0a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733325849244 2024-12-04T15:24:12,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting eb54a477650d451bb8449f0ef5c42cd6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733325849244 2024-12-04T15:24:12,254 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1ecec2448854ff9acb4e9f6fa324fc2, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733325851458 2024-12-04T15:24:12,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d23a6152afd4c9ea17b3332e996c0b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733325851458 2024-12-04T15:24:12,267 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#561 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:12,268 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/96c40afe942b461ab33313754de63cd1 is 50, key is test_row_0/B:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,272 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,292 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120492ad31de334246fc8bfde8fa2dedc420_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,295 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120492ad31de334246fc8bfde8fa2dedc420_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,295 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120492ad31de334246fc8bfde8fa2dedc420_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742482_1658 (size=12241) 2024-12-04T15:24:12,317 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/96c40afe942b461ab33313754de63cd1 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/96c40afe942b461ab33313754de63cd1 2024-12-04T15:24:12,326 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into 96c40afe942b461ab33313754de63cd1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:12,326 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:12,326 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=12, startTime=1733325852249; duration=0sec 2024-12-04T15:24:12,326 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:12,327 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:12,327 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:12,329 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:12,329 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:12,329 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:12,329 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/20afed4e64104bc9a278501b21ebb97d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/293ae11f8eef447d9ac9e5438d7ea94e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/3885a155af014b448b516d5ba9e842f6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/b96687febea94d28bfaf334528b1eeee] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=47.0 K 2024-12-04T15:24:12,329 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 20afed4e64104bc9a278501b21ebb97d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733325847018 2024-12-04T15:24:12,330 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 293ae11f8eef447d9ac9e5438d7ea94e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733325847762 2024-12-04T15:24:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742483_1659 (size=4469) 2024-12-04T15:24:12,330 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3885a155af014b448b516d5ba9e842f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733325849244 2024-12-04T15:24:12,331 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b96687febea94d28bfaf334528b1eeee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733325851458 2024-12-04T15:24:12,331 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#562 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:12,332 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/bf1643f0fb284718a252d46dc8dc0e1a is 175, key is test_row_0/A:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,350 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#563 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:12,351 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/806753efdd574085bc823d27f92d7e72 is 50, key is test_row_0/C:col10/1733325851466/Put/seqid=0 2024-12-04T15:24:12,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742484_1660 (size=31195) 2024-12-04T15:24:12,363 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/bf1643f0fb284718a252d46dc8dc0e1a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bf1643f0fb284718a252d46dc8dc0e1a 2024-12-04T15:24:12,368 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into bf1643f0fb284718a252d46dc8dc0e1a(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:12,368 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:12,368 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=12, startTime=1733325852249; duration=0sec 2024-12-04T15:24:12,369 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:12,369 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:12,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742485_1661 (size=12241) 2024-12-04T15:24:12,385 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/806753efdd574085bc823d27f92d7e72 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/806753efdd574085bc823d27f92d7e72 2024-12-04T15:24:12,389 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into 806753efdd574085bc823d27f92d7e72(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:12,389 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:12,389 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=12, startTime=1733325852250; duration=0sec 2024-12-04T15:24:12,389 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:12,389 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:12,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:24:12,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:12,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:12,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:12,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:12,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046e2145939a7b42a3ac486620374ad331_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325852395/Put/seqid=0 2024-12-04T15:24:12,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742486_1662 (size=14644) 2024-12-04T15:24:12,437 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:12,441 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412046e2145939a7b42a3ac486620374ad331_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046e2145939a7b42a3ac486620374ad331_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:12,442 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e2a676a00df94308b8331ff7ca99800f, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e2a676a00df94308b8331ff7ca99800f is 175, key is test_row_0/A:col10/1733325852395/Put/seqid=0 2024-12-04T15:24:12,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742487_1663 (size=39599) 2024-12-04T15:24:12,480 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e2a676a00df94308b8331ff7ca99800f 2024-12-04T15:24:12,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325912488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/2d0cbc6eb36448118ceebf72a26ef622 is 50, key is test_row_0/B:col10/1733325852395/Put/seqid=0 2024-12-04T15:24:12,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742488_1664 (size=12051) 2024-12-04T15:24:12,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/2d0cbc6eb36448118ceebf72a26ef622 2024-12-04T15:24:12,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/464c1fa18efe4e6294fa6f3b78b6117a is 50, key is test_row_0/C:col10/1733325852395/Put/seqid=0 2024-12-04T15:24:12,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742489_1665 (size=12051) 2024-12-04T15:24:12,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/464c1fa18efe4e6294fa6f3b78b6117a 2024-12-04T15:24:12,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e2a676a00df94308b8331ff7ca99800f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f 2024-12-04T15:24:12,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f, entries=200, sequenceid=133, filesize=38.7 K 2024-12-04T15:24:12,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/2d0cbc6eb36448118ceebf72a26ef622 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/2d0cbc6eb36448118ceebf72a26ef622 2024-12-04T15:24:12,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/2d0cbc6eb36448118ceebf72a26ef622, entries=150, sequenceid=133, filesize=11.8 K 2024-12-04T15:24:12,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/464c1fa18efe4e6294fa6f3b78b6117a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/464c1fa18efe4e6294fa6f3b78b6117a 2024-12-04T15:24:12,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325912584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/464c1fa18efe4e6294fa6f3b78b6117a, entries=150, sequenceid=133, filesize=11.8 K 2024-12-04T15:24:12,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5345b80b290e2620248a8fde2595e371 in 194ms, sequenceid=133, compaction requested=false 2024-12-04T15:24:12,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:12,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:24:12,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:12,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:12,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:12,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:12,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:12,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325912614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325912615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412048451c064d88f4351843680e256758efb_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:12,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742490_1666 (size=12304) 2024-12-04T15:24:12,661 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:12,676 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412048451c064d88f4351843680e256758efb_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412048451c064d88f4351843680e256758efb_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:12,680 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/9981e058f1b04e968b2dcafcfb5277fc, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:12,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/9981e058f1b04e968b2dcafcfb5277fc is 175, key is test_row_0/A:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:12,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742491_1667 (size=31105) 2024-12-04T15:24:12,719 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/9981e058f1b04e968b2dcafcfb5277fc 2024-12-04T15:24:12,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325912719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325912721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/cf27f4265656466cb9080f9f666c01d0 is 50, key is test_row_0/B:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:12,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742492_1668 (size=12151) 2024-12-04T15:24:12,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/cf27f4265656466cb9080f9f666c01d0 2024-12-04T15:24:12,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/876dd432bf884fa18931cb994c511468 is 50, key is test_row_0/C:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:12,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742493_1669 (size=12151) 2024-12-04T15:24:12,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325912928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:12,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:12,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325912930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-04T15:24:13,027 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-04T15:24:13,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-04T15:24:13,032 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-04T15:24:13,032 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:13,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:13,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325913066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,068 DEBUG [Thread-2755 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:24:13,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325913072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,075 DEBUG [Thread-2757 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8206 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., hostname=645c2dbfef2e,42169,1733325683856, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:24:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-04T15:24:13,184 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-04T15:24:13,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:13,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:13,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:13,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/876dd432bf884fa18931cb994c511468 2024-12-04T15:24:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/9981e058f1b04e968b2dcafcfb5277fc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc 2024-12-04T15:24:13,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325913235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325913236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc, entries=150, sequenceid=157, filesize=30.4 K 2024-12-04T15:24:13,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/cf27f4265656466cb9080f9f666c01d0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/cf27f4265656466cb9080f9f666c01d0 2024-12-04T15:24:13,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/cf27f4265656466cb9080f9f666c01d0, entries=150, sequenceid=157, filesize=11.9 K 2024-12-04T15:24:13,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/876dd432bf884fa18931cb994c511468 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/876dd432bf884fa18931cb994c511468 2024-12-04T15:24:13,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/876dd432bf884fa18931cb994c511468, entries=150, sequenceid=157, filesize=11.9 K 2024-12-04T15:24:13,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5345b80b290e2620248a8fde2595e371 in 656ms, sequenceid=157, compaction requested=true 2024-12-04T15:24:13,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:13,252 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:13,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:13,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:13,252 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:13,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:13,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:13,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:13,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:13,253 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,253 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bf1643f0fb284718a252d46dc8dc0e1a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=99.5 K 2024-12-04T15:24:13,253 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bf1643f0fb284718a252d46dc8dc0e1a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc] 2024-12-04T15:24:13,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:13,253 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:13,253 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,253 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/96c40afe942b461ab33313754de63cd1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/2d0cbc6eb36448118ceebf72a26ef622, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/cf27f4265656466cb9080f9f666c01d0] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=35.6 K 2024-12-04T15:24:13,253 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf1643f0fb284718a252d46dc8dc0e1a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733325851458 2024-12-04T15:24:13,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:13,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 96c40afe942b461ab33313754de63cd1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733325851458 2024-12-04T15:24:13,254 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2a676a00df94308b8331ff7ca99800f, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733325852071 2024-12-04T15:24:13,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d0cbc6eb36448118ceebf72a26ef622, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733325852075 2024-12-04T15:24:13,254 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting cf27f4265656466cb9080f9f666c01d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733325852482 2024-12-04T15:24:13,255 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9981e058f1b04e968b2dcafcfb5277fc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733325852482 2024-12-04T15:24:13,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:13,264 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#570 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:13,265 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0d3fd99219a34253a759d7fa64316faa is 50, key is test_row_0/B:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:13,276 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:13,286 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412043cb3e15f17bf48248172e4422edb7438_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:13,289 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412043cb3e15f17bf48248172e4422edb7438_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:13,289 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412043cb3e15f17bf48248172e4422edb7438_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:13,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742494_1670 (size=12493) 2024-12-04T15:24:13,318 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0d3fd99219a34253a759d7fa64316faa as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0d3fd99219a34253a759d7fa64316faa 2024-12-04T15:24:13,323 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into 0d3fd99219a34253a759d7fa64316faa(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:13,323 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:13,323 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=13, startTime=1733325853252; duration=0sec 2024-12-04T15:24:13,323 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:13,323 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:13,323 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:13,324 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:13,325 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:13,325 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,325 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/806753efdd574085bc823d27f92d7e72, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/464c1fa18efe4e6294fa6f3b78b6117a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/876dd432bf884fa18931cb994c511468] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=35.6 K 2024-12-04T15:24:13,325 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 806753efdd574085bc823d27f92d7e72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733325851458 2024-12-04T15:24:13,326 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 464c1fa18efe4e6294fa6f3b78b6117a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733325852075 2024-12-04T15:24:13,326 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 876dd432bf884fa18931cb994c511468, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733325852482 2024-12-04T15:24:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-04T15:24:13,337 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:13,338 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742495_1671 (size=4469) 2024-12-04T15:24:13,357 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#571 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:13,357 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/ec223e494cb94fb3a27876ba242a5430 is 175, key is test_row_0/A:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:13,360 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#572 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:13,361 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/8e981be9d1e14bb3b14aaed8771624e5 is 50, key is test_row_0/C:col10/1733325852485/Put/seqid=0 2024-12-04T15:24:13,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204718c9c0a5d08414d8fc83082f36f7c98_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325852601/Put/seqid=0 2024-12-04T15:24:13,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742496_1672 (size=31447) 2024-12-04T15:24:13,409 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/ec223e494cb94fb3a27876ba242a5430 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ec223e494cb94fb3a27876ba242a5430 2024-12-04T15:24:13,417 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into ec223e494cb94fb3a27876ba242a5430(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:13,417 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:13,417 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=13, startTime=1733325853251; duration=0sec 2024-12-04T15:24:13,417 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:13,417 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742497_1673 (size=12493) 2024-12-04T15:24:13,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742498_1674 (size=12304) 2024-12-04T15:24:13,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:13,424 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/8e981be9d1e14bb3b14aaed8771624e5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/8e981be9d1e14bb3b14aaed8771624e5 2024-12-04T15:24:13,426 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204718c9c0a5d08414d8fc83082f36f7c98_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204718c9c0a5d08414d8fc83082f36f7c98_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:13,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd6417d08f3b4f72b1777f1d68620080, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:13,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd6417d08f3b4f72b1777f1d68620080 is 175, key is test_row_0/A:col10/1733325852601/Put/seqid=0 2024-12-04T15:24:13,430 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into 8e981be9d1e14bb3b14aaed8771624e5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:13,430 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:13,431 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=13, startTime=1733325853252; duration=0sec 2024-12-04T15:24:13,431 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:13,431 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:13,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742499_1675 (size=31105) 2024-12-04T15:24:13,436 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd6417d08f3b4f72b1777f1d68620080 2024-12-04T15:24:13,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/964ce5f8d93a42e8bd966722267d67dc is 50, key is test_row_0/B:col10/1733325852601/Put/seqid=0 2024-12-04T15:24:13,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742500_1676 (size=12151) 2024-12-04T15:24:13,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-04T15:24:13,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325913800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325913814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325913815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,883 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/964ce5f8d93a42e8bd966722267d67dc 2024-12-04T15:24:13,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325913908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/74dc3fced93245d2897f11a12f85ce0c is 50, key is test_row_0/C:col10/1733325852601/Put/seqid=0 2024-12-04T15:24:13,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325913917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325913918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:13,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742501_1677 (size=12151) 2024-12-04T15:24:13,973 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/74dc3fced93245d2897f11a12f85ce0c 2024-12-04T15:24:13,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd6417d08f3b4f72b1777f1d68620080 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080 2024-12-04T15:24:13,982 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080, entries=150, sequenceid=170, filesize=30.4 K 2024-12-04T15:24:13,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/964ce5f8d93a42e8bd966722267d67dc as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/964ce5f8d93a42e8bd966722267d67dc 2024-12-04T15:24:14,007 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/964ce5f8d93a42e8bd966722267d67dc, entries=150, sequenceid=170, filesize=11.9 K 2024-12-04T15:24:14,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/74dc3fced93245d2897f11a12f85ce0c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/74dc3fced93245d2897f11a12f85ce0c 2024-12-04T15:24:14,022 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/74dc3fced93245d2897f11a12f85ce0c, entries=150, sequenceid=170, filesize=11.9 K 2024-12-04T15:24:14,023 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 5345b80b290e2620248a8fde2595e371 in 685ms, sequenceid=170, compaction requested=false 2024-12-04T15:24:14,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:14,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-04T15:24:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-04T15:24:14,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-04T15:24:14,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 992 msec 2024-12-04T15:24:14,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 996 msec 2024-12-04T15:24:14,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:14,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-04T15:24:14,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:14,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:14,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:14,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:14,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:14,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:14,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204624cbb5f0cb24dc3861e6005cba8f2e0_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:14,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325914128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325914128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325914129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742502_1678 (size=14794) 2024-12-04T15:24:14,133 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-04T15:24:14,136 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-04T15:24:14,136 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204624cbb5f0cb24dc3861e6005cba8f2e0_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204624cbb5f0cb24dc3861e6005cba8f2e0_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:14,140 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e5a00f56f2274ef7b6b8a3649adebe88, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:14,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:14,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e5a00f56f2274ef7b6b8a3649adebe88 is 175, key is test_row_0/A:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:14,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-04T15:24:14,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:14,145 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:14,145 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:14,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:14,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742503_1679 (size=39749) 2024-12-04T15:24:14,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325914233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325914233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:14,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325914244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,300 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:14,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:14,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325914440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325914440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:14,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325914460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,583 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e5a00f56f2274ef7b6b8a3649adebe88 2024-12-04T15:24:14,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0ec81ff53ec04e6aab15c987ebd5b711 is 50, key is test_row_0/B:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:14,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:14,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:14,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742504_1680 (size=12151) 2024-12-04T15:24:14,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0ec81ff53ec04e6aab15c987ebd5b711 2024-12-04T15:24:14,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/4e6a7961156a45a7a081a39121cc6e02 is 50, key is test_row_0/C:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:14,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742505_1681 (size=12151) 2024-12-04T15:24:14,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325914751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325914752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:14,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325914769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:14,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:14,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:14,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:14,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:14,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:14,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/4e6a7961156a45a7a081a39121cc6e02 2024-12-04T15:24:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/e5a00f56f2274ef7b6b8a3649adebe88 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88 2024-12-04T15:24:15,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88, entries=200, sequenceid=199, filesize=38.8 K 2024-12-04T15:24:15,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0ec81ff53ec04e6aab15c987ebd5b711 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ec81ff53ec04e6aab15c987ebd5b711 2024-12-04T15:24:15,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ec81ff53ec04e6aab15c987ebd5b711, entries=150, sequenceid=199, filesize=11.9 K 2024-12-04T15:24:15,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/4e6a7961156a45a7a081a39121cc6e02 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e6a7961156a45a7a081a39121cc6e02 2024-12-04T15:24:15,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e6a7961156a45a7a081a39121cc6e02, entries=150, sequenceid=199, filesize=11.9 K 2024-12-04T15:24:15,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 5345b80b290e2620248a8fde2595e371 in 1051ms, sequenceid=199, compaction requested=true 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:15,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-04T15:24:15,173 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:15,174 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:15,174 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:15,174 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,174 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ec223e494cb94fb3a27876ba242a5430, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=99.9 K 2024-12-04T15:24:15,174 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,174 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ec223e494cb94fb3a27876ba242a5430, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88] 2024-12-04T15:24:15,175 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec223e494cb94fb3a27876ba242a5430, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733325852482 2024-12-04T15:24:15,176 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:15,176 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd6417d08f3b4f72b1777f1d68620080, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733325852601 2024-12-04T15:24:15,176 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5a00f56f2274ef7b6b8a3649adebe88, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325853788 2024-12-04T15:24:15,178 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:15,178 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:15,178 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,179 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/8e981be9d1e14bb3b14aaed8771624e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/74dc3fced93245d2897f11a12f85ce0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e6a7961156a45a7a081a39121cc6e02] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=35.9 K 2024-12-04T15:24:15,179 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e981be9d1e14bb3b14aaed8771624e5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733325852482 2024-12-04T15:24:15,180 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 74dc3fced93245d2897f11a12f85ce0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733325852601 2024-12-04T15:24:15,180 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e6a7961156a45a7a081a39121cc6e02, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325853788 2024-12-04T15:24:15,186 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:15,188 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120498d38a0dfb9346448f2a4f608bac9914_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:15,189 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#580 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:15,189 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/62b3aa7d12bf4d699058646a3972a117 is 50, key is test_row_0/C:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:15,190 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120498d38a0dfb9346448f2a4f608bac9914_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:15,190 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120498d38a0dfb9346448f2a4f608bac9914_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:15,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742507_1683 (size=4469) 2024-12-04T15:24:15,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742506_1682 (size=12595) 2024-12-04T15:24:15,210 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/62b3aa7d12bf4d699058646a3972a117 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/62b3aa7d12bf4d699058646a3972a117 2024-12-04T15:24:15,215 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into 62b3aa7d12bf4d699058646a3972a117(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:15,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:15,215 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=13, startTime=1733325855172; duration=0sec 2024-12-04T15:24:15,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:15,215 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:15,216 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:15,217 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:15,217 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:15,217 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,217 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0d3fd99219a34253a759d7fa64316faa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/964ce5f8d93a42e8bd966722267d67dc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ec81ff53ec04e6aab15c987ebd5b711] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=35.9 K 2024-12-04T15:24:15,218 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d3fd99219a34253a759d7fa64316faa, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733325852482 2024-12-04T15:24:15,219 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 964ce5f8d93a42e8bd966722267d67dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733325852601 2024-12-04T15:24:15,219 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ec81ff53ec04e6aab15c987ebd5b711, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325853788 2024-12-04T15:24:15,233 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#581 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:15,233 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/eb6abe3cf6004bcba1f1376ca42eb26e is 50, key is test_row_0/B:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:15,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:15,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:15,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-04T15:24:15,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:15,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:15,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742508_1684 (size=12595) 2024-12-04T15:24:15,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042e842442b375492bb9dc185fd9d24069_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325854123/Put/seqid=0 2024-12-04T15:24:15,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:15,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742509_1685 (size=12304) 2024-12-04T15:24:15,302 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:15,304 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/eb6abe3cf6004bcba1f1376ca42eb26e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb6abe3cf6004bcba1f1376ca42eb26e 2024-12-04T15:24:15,306 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412042e842442b375492bb9dc185fd9d24069_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042e842442b375492bb9dc185fd9d24069_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:15,307 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/f83d5a59a0fd4479851238d9d8b71db7, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:15,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/f83d5a59a0fd4479851238d9d8b71db7 is 175, key is test_row_0/A:col10/1733325854123/Put/seqid=0 2024-12-04T15:24:15,319 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into eb6abe3cf6004bcba1f1376ca42eb26e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:15,319 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:15,320 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=13, startTime=1733325855172; duration=0sec 2024-12-04T15:24:15,320 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:15,320 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:15,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742510_1686 (size=31105) 2024-12-04T15:24:15,322 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/f83d5a59a0fd4479851238d9d8b71db7 2024-12-04T15:24:15,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/f930758bfb194482bb9356316a7de0c4 is 50, key is test_row_0/B:col10/1733325854123/Put/seqid=0 2024-12-04T15:24:15,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742511_1687 (size=12151) 2024-12-04T15:24:15,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/f930758bfb194482bb9356316a7de0c4 2024-12-04T15:24:15,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/72b77bc8dd034980b2cfd0b41d0d405a is 50, key is test_row_0/C:col10/1733325854123/Put/seqid=0 2024-12-04T15:24:15,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325915346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325915348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325915351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742512_1688 (size=12151) 2024-12-04T15:24:15,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/72b77bc8dd034980b2cfd0b41d0d405a 2024-12-04T15:24:15,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/f83d5a59a0fd4479851238d9d8b71db7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7 2024-12-04T15:24:15,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7, entries=150, sequenceid=212, filesize=30.4 K 2024-12-04T15:24:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/f930758bfb194482bb9356316a7de0c4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/f930758bfb194482bb9356316a7de0c4 2024-12-04T15:24:15,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/f930758bfb194482bb9356316a7de0c4, entries=150, sequenceid=212, filesize=11.9 K 2024-12-04T15:24:15,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/72b77bc8dd034980b2cfd0b41d0d405a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/72b77bc8dd034980b2cfd0b41d0d405a 2024-12-04T15:24:15,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/72b77bc8dd034980b2cfd0b41d0d405a, entries=150, sequenceid=212, filesize=11.9 K 2024-12-04T15:24:15,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 5345b80b290e2620248a8fde2595e371 in 115ms, sequenceid=212, compaction requested=false 2024-12-04T15:24:15,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:15,420 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-04T15:24:15,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:15,421 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-04T15:24:15,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:15,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:15,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:15,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:15,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:15,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:15,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204f872128c7d184513b23323ee00795f4c_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325855349/Put/seqid=0 2024-12-04T15:24:15,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742513_1689 (size=12304) 2024-12-04T15:24:15,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:15,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325915464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325915466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325915467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325915568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325915569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325915571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,598 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#579 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:15,599 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/60977262f11243eb8133ab1278a62c1e is 175, key is test_row_0/A:col10/1733325853794/Put/seqid=0 2024-12-04T15:24:15,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742514_1690 (size=31549) 2024-12-04T15:24:15,608 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/60977262f11243eb8133ab1278a62c1e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/60977262f11243eb8133ab1278a62c1e 2024-12-04T15:24:15,615 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into 60977262f11243eb8133ab1278a62c1e(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:15,615 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:15,615 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=13, startTime=1733325855172; duration=0sec 2024-12-04T15:24:15,615 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:15,615 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:15,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325915772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325915772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:15,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325915776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:15,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:15,838 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204f872128c7d184513b23323ee00795f4c_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f872128c7d184513b23323ee00795f4c_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:15,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/976a51e368a94fbda218de488252d2be, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:15,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/976a51e368a94fbda218de488252d2be is 175, key is test_row_0/A:col10/1733325855349/Put/seqid=0 2024-12-04T15:24:15,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742515_1691 (size=31105) 2024-12-04T15:24:15,873 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/976a51e368a94fbda218de488252d2be 2024-12-04T15:24:15,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9c590a1f54f8469389e475a007a68502 is 50, key is test_row_0/B:col10/1733325855349/Put/seqid=0 2024-12-04T15:24:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742516_1692 (size=12151) 2024-12-04T15:24:16,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:16,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325916080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:16,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325916084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:16,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325916084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:16,328 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9c590a1f54f8469389e475a007a68502 2024-12-04T15:24:16,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/365726a1884c4cfaa2030ad7e2cbbef0 is 50, key is test_row_0/C:col10/1733325855349/Put/seqid=0 2024-12-04T15:24:16,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742517_1693 (size=12151) 2024-12-04T15:24:16,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325916585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:16,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325916597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:16,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325916600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:16,840 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/365726a1884c4cfaa2030ad7e2cbbef0 2024-12-04T15:24:16,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/976a51e368a94fbda218de488252d2be as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be 2024-12-04T15:24:16,933 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be, entries=150, sequenceid=237, filesize=30.4 K 2024-12-04T15:24:16,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9c590a1f54f8469389e475a007a68502 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9c590a1f54f8469389e475a007a68502 2024-12-04T15:24:16,988 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9c590a1f54f8469389e475a007a68502, entries=150, sequenceid=237, filesize=11.9 K 2024-12-04T15:24:16,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/365726a1884c4cfaa2030ad7e2cbbef0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/365726a1884c4cfaa2030ad7e2cbbef0 2024-12-04T15:24:17,009 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/365726a1884c4cfaa2030ad7e2cbbef0, entries=150, sequenceid=237, filesize=11.9 K 2024-12-04T15:24:17,010 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5345b80b290e2620248a8fde2595e371 in 1589ms, sequenceid=237, compaction requested=true 2024-12-04T15:24:17,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:17,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:17,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-04T15:24:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-04T15:24:17,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-04T15:24:17,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8660 sec 2024-12-04T15:24:17,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.8720 sec 2024-12-04T15:24:17,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:17,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:24:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:17,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204dcb4f6e201e9437bafb12f7b875e1c2e_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:17,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325917661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742518_1694 (size=14794) 2024-12-04T15:24:17,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325917663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325917665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325917772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325917772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325917772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325917984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325917985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:17,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:17,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325917985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,079 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:18,104 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204dcb4f6e201e9437bafb12f7b875e1c2e_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204dcb4f6e201e9437bafb12f7b875e1c2e_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:18,112 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/186e46218e814fdf8446eab8559dfbef, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:18,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/186e46218e814fdf8446eab8559dfbef is 175, key is test_row_0/A:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:18,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742519_1695 (size=39749) 2024-12-04T15:24:18,196 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/186e46218e814fdf8446eab8559dfbef 2024-12-04T15:24:18,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0f9f56f16221463a906491ffce1e32b9 is 50, key is test_row_0/B:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:18,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742520_1696 (size=12151) 2024-12-04T15:24:18,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0f9f56f16221463a906491ffce1e32b9 2024-12-04T15:24:18,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1ced1b23f66e467a8ef969343ec0b4b6 is 50, key is test_row_0/C:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:18,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325918291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325918292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-04T15:24:18,297 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-04T15:24:18,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325918296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:18,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-04T15:24:18,300 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:18,301 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:18,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:18,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-04T15:24:18,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742521_1697 (size=12151) 2024-12-04T15:24:18,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-04T15:24:18,453 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-04T15:24:18,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:18,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:18,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-04T15:24:18,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-04T15:24:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:18,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1ced1b23f66e467a8ef969343ec0b4b6 2024-12-04T15:24:18,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/186e46218e814fdf8446eab8559dfbef as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef 2024-12-04T15:24:18,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef, entries=200, sequenceid=251, filesize=38.8 K 2024-12-04T15:24:18,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0f9f56f16221463a906491ffce1e32b9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0f9f56f16221463a906491ffce1e32b9 2024-12-04T15:24:18,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0f9f56f16221463a906491ffce1e32b9, entries=150, sequenceid=251, filesize=11.9 K 2024-12-04T15:24:18,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1ced1b23f66e467a8ef969343ec0b4b6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1ced1b23f66e467a8ef969343ec0b4b6 2024-12-04T15:24:18,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1ced1b23f66e467a8ef969343ec0b4b6, entries=150, sequenceid=251, filesize=11.9 K 2024-12-04T15:24:18,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5345b80b290e2620248a8fde2595e371 in 1140ms, sequenceid=251, compaction requested=true 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:18,747 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:18,747 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:18,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:18,748 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:18,748 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:18,749 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,749 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb6abe3cf6004bcba1f1376ca42eb26e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/f930758bfb194482bb9356316a7de0c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9c590a1f54f8469389e475a007a68502, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0f9f56f16221463a906491ffce1e32b9] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=47.9 K 2024-12-04T15:24:18,749 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133508 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:18,749 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:18,749 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,749 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/60977262f11243eb8133ab1278a62c1e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=130.4 K 2024-12-04T15:24:18,749 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,749 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/60977262f11243eb8133ab1278a62c1e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef] 2024-12-04T15:24:18,750 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting eb6abe3cf6004bcba1f1376ca42eb26e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325853788 2024-12-04T15:24:18,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60977262f11243eb8133ab1278a62c1e, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325853788 2024-12-04T15:24:18,752 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting f930758bfb194482bb9356316a7de0c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733325854123 2024-12-04T15:24:18,752 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f83d5a59a0fd4479851238d9d8b71db7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733325854123 2024-12-04T15:24:18,757 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c590a1f54f8469389e475a007a68502, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733325855339 2024-12-04T15:24:18,757 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 976a51e368a94fbda218de488252d2be, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733325855339 2024-12-04T15:24:18,757 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f9f56f16221463a906491ffce1e32b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325855463 2024-12-04T15:24:18,758 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 186e46218e814fdf8446eab8559dfbef, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325855454 2024-12-04T15:24:18,759 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,760 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:18,774 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#591 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:18,775 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/15f0bd05e51b42e2a28e430e2705d476 is 50, key is test_row_0/B:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:18,777 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:18,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204445bfd43f7bf404c837e15d3f81e19fa_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325857662/Put/seqid=0 2024-12-04T15:24:18,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:18,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:18,814 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120424eced09cfd94fc29d142a5bd27a3ffa_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:18,816 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120424eced09cfd94fc29d142a5bd27a3ffa_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:18,816 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120424eced09cfd94fc29d142a5bd27a3ffa_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742522_1698 (size=12731) 2024-12-04T15:24:18,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325918818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325918818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325918819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,826 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/15f0bd05e51b42e2a28e430e2705d476 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/15f0bd05e51b42e2a28e430e2705d476 2024-12-04T15:24:18,832 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into 15f0bd05e51b42e2a28e430e2705d476(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:18,832 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:18,832 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=12, startTime=1733325858747; duration=0sec 2024-12-04T15:24:18,832 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:18,832 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:18,832 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:18,834 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:18,834 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:18,834 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,834 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/62b3aa7d12bf4d699058646a3972a117, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/72b77bc8dd034980b2cfd0b41d0d405a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/365726a1884c4cfaa2030ad7e2cbbef0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1ced1b23f66e467a8ef969343ec0b4b6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=47.9 K 2024-12-04T15:24:18,834 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 62b3aa7d12bf4d699058646a3972a117, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733325853788 2024-12-04T15:24:18,834 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 72b77bc8dd034980b2cfd0b41d0d405a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733325854123 2024-12-04T15:24:18,835 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 365726a1884c4cfaa2030ad7e2cbbef0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733325855339 2024-12-04T15:24:18,835 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ced1b23f66e467a8ef969343ec0b4b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325855463 2024-12-04T15:24:18,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742523_1699 (size=12454) 2024-12-04T15:24:18,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:18,841 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204445bfd43f7bf404c837e15d3f81e19fa_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204445bfd43f7bf404c837e15d3f81e19fa_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:18,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/410f09271e894e5a86a8aa859d87bf71, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:18,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/410f09271e894e5a86a8aa859d87bf71 is 175, key is test_row_0/A:col10/1733325857662/Put/seqid=0 2024-12-04T15:24:18,854 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#594 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:18,855 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/e178d8169cb648bda8b3fd47093eae45 is 50, key is test_row_0/C:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:18,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742524_1700 (size=4469) 2024-12-04T15:24:18,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742525_1701 (size=31255) 2024-12-04T15:24:18,869 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/410f09271e894e5a86a8aa859d87bf71 2024-12-04T15:24:18,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742526_1702 (size=12731) 2024-12-04T15:24:18,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/dfc794dc0d084a7e8d5b75ea1c169b7c is 50, key is test_row_0/B:col10/1733325857662/Put/seqid=0 2024-12-04T15:24:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-04T15:24:18,914 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/e178d8169cb648bda8b3fd47093eae45 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/e178d8169cb648bda8b3fd47093eae45 2024-12-04T15:24:18,919 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into e178d8169cb648bda8b3fd47093eae45(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:18,919 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:18,919 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=12, startTime=1733325858747; duration=0sec 2024-12-04T15:24:18,919 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:18,919 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:18,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742527_1703 (size=12301) 2024-12-04T15:24:18,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325918924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,926 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/dfc794dc0d084a7e8d5b75ea1c169b7c 2024-12-04T15:24:18,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325918924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325918924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:18,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/6831244852d54d138dabad2e31faf3e8 is 50, key is test_row_0/C:col10/1733325857662/Put/seqid=0 2024-12-04T15:24:18,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742528_1704 (size=12301) 2024-12-04T15:24:18,955 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/6831244852d54d138dabad2e31faf3e8 2024-12-04T15:24:18,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/410f09271e894e5a86a8aa859d87bf71 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71 2024-12-04T15:24:18,966 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71, entries=150, sequenceid=274, filesize=30.5 K 2024-12-04T15:24:18,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/dfc794dc0d084a7e8d5b75ea1c169b7c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/dfc794dc0d084a7e8d5b75ea1c169b7c 2024-12-04T15:24:18,971 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/dfc794dc0d084a7e8d5b75ea1c169b7c, entries=150, sequenceid=274, filesize=12.0 K 2024-12-04T15:24:18,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/6831244852d54d138dabad2e31faf3e8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/6831244852d54d138dabad2e31faf3e8 2024-12-04T15:24:18,977 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/6831244852d54d138dabad2e31faf3e8, entries=150, sequenceid=274, filesize=12.0 K 2024-12-04T15:24:18,978 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5345b80b290e2620248a8fde2595e371 in 218ms, sequenceid=274, compaction requested=false 2024-12-04T15:24:18,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:18,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:18,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-04T15:24:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-04T15:24:18,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-04T15:24:18,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 679 msec 2024-12-04T15:24:18,985 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 684 msec 2024-12-04T15:24:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:19,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-04T15:24:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:19,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:19,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412047ebb9d58a2e74097b17e4974714eaac6_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742529_1705 (size=14994) 2024-12-04T15:24:19,164 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:19,168 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412047ebb9d58a2e74097b17e4974714eaac6_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047ebb9d58a2e74097b17e4974714eaac6_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:19,169 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3f956f958fed48f59494a1ff0a4ed059, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:19,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3f956f958fed48f59494a1ff0a4ed059 is 175, key is test_row_0/A:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325919166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742530_1706 (size=39949) 2024-12-04T15:24:19,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325919172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,177 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3f956f958fed48f59494a1ff0a4ed059 2024-12-04T15:24:19,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325919172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/8a1c98a88e644d1c82d44989c8310bc4 is 50, key is test_row_0/B:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742531_1707 (size=12301) 2024-12-04T15:24:19,263 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#592 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:19,264 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/5f2637c6132a45a4b49727d921e4bfe5 is 175, key is test_row_0/A:col10/1733325855466/Put/seqid=0 2024-12-04T15:24:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325919273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325919278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325919278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742532_1708 (size=31685) 2024-12-04T15:24:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-04T15:24:19,408 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-04T15:24:19,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-04T15:24:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-04T15:24:19,413 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:19,414 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:19,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:19,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325919477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325919481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325919481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-04T15:24:19,565 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-04T15:24:19,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:19,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/8a1c98a88e644d1c82d44989c8310bc4 2024-12-04T15:24:19,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/419a39a8b2044c0b828fafec533535e8 is 50, key is test_row_0/C:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742533_1709 (size=12301) 2024-12-04T15:24:19,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/419a39a8b2044c0b828fafec533535e8 2024-12-04T15:24:19,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-04T15:24:19,721 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/5f2637c6132a45a4b49727d921e4bfe5 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/5f2637c6132a45a4b49727d921e4bfe5 2024-12-04T15:24:19,724 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-04T15:24:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/3f956f958fed48f59494a1ff0a4ed059 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059 2024-12-04T15:24:19,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059, entries=200, sequenceid=291, filesize=39.0 K 2024-12-04T15:24:19,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/8a1c98a88e644d1c82d44989c8310bc4 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/8a1c98a88e644d1c82d44989c8310bc4 2024-12-04T15:24:19,750 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into 5f2637c6132a45a4b49727d921e4bfe5(size=30.9 K), total size for store is 100.5 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-04T15:24:19,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:19,750 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=12, startTime=1733325858747; duration=1sec 2024-12-04T15:24:19,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:19,750 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:19,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/8a1c98a88e644d1c82d44989c8310bc4, entries=150, sequenceid=291, filesize=12.0 K 2024-12-04T15:24:19,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/419a39a8b2044c0b828fafec533535e8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/419a39a8b2044c0b828fafec533535e8 2024-12-04T15:24:19,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/419a39a8b2044c0b828fafec533535e8, entries=150, sequenceid=291, filesize=12.0 K 2024-12-04T15:24:19,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 5345b80b290e2620248a8fde2595e371 in 625ms, sequenceid=291, compaction requested=true 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:19,757 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:19,757 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:19,758 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:19,758 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:19,758 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,758 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/5f2637c6132a45a4b49727d921e4bfe5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=100.5 K 2024-12-04T15:24:19,758 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,758 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/5f2637c6132a45a4b49727d921e4bfe5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059] 2024-12-04T15:24:19,759 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:19,759 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:19,759 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,759 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/15f0bd05e51b42e2a28e430e2705d476, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/dfc794dc0d084a7e8d5b75ea1c169b7c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/8a1c98a88e644d1c82d44989c8310bc4] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=36.5 K 2024-12-04T15:24:19,760 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f2637c6132a45a4b49727d921e4bfe5, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325855463 2024-12-04T15:24:19,763 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 410f09271e894e5a86a8aa859d87bf71, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733325857660 2024-12-04T15:24:19,763 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 15f0bd05e51b42e2a28e430e2705d476, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325855463 2024-12-04T15:24:19,764 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f956f958fed48f59494a1ff0a4ed059, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325858808 2024-12-04T15:24:19,764 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting dfc794dc0d084a7e8d5b75ea1c169b7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733325857660 2024-12-04T15:24:19,765 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a1c98a88e644d1c82d44989c8310bc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325858808 2024-12-04T15:24:19,789 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:19,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-04T15:24:19,793 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#601 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:19,793 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204aee0e3f8dd514aecab581b9e4bf25e3d_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:19,793 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/ed11bdbdf64e4160970d26505eb78799 is 50, key is test_row_0/B:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,794 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204aee0e3f8dd514aecab581b9e4bf25e3d_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:19,795 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204aee0e3f8dd514aecab581b9e4bf25e3d_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:19,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742534_1710 (size=4469) 2024-12-04T15:24:19,812 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#600 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:19,812 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/07900a34941644e2911536d774d9bdd2 is 175, key is test_row_0/A:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325919817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325919817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325919817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fb5f2946349d4c47886e2914dcdc3469_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325859790/Put/seqid=0 2024-12-04T15:24:19,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742535_1711 (size=12983) 2024-12-04T15:24:19,878 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/ed11bdbdf64e4160970d26505eb78799 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ed11bdbdf64e4160970d26505eb78799 2024-12-04T15:24:19,883 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into ed11bdbdf64e4160970d26505eb78799(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:19,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:19,883 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=13, startTime=1733325859757; duration=0sec 2024-12-04T15:24:19,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:19,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:19,883 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T15:24:19,885 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T15:24:19,885 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:19,885 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,885 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/e178d8169cb648bda8b3fd47093eae45, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/6831244852d54d138dabad2e31faf3e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/419a39a8b2044c0b828fafec533535e8] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=36.5 K 2024-12-04T15:24:19,886 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting e178d8169cb648bda8b3fd47093eae45, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733325855463 2024-12-04T15:24:19,886 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6831244852d54d138dabad2e31faf3e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733325857660 2024-12-04T15:24:19,887 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 419a39a8b2044c0b828fafec533535e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325858808 2024-12-04T15:24:19,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-04T15:24:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:19,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:19,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:19,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742536_1712 (size=31937) 2024-12-04T15:24:19,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742537_1713 (size=12454) 2024-12-04T15:24:19,898 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:19,898 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/07900a34941644e2911536d774d9bdd2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/07900a34941644e2911536d774d9bdd2 2024-12-04T15:24:19,902 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#603 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:19,902 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/7f226f71c159433a9543e80f6c7ed102 is 50, key is test_row_0/C:col10/1733325859129/Put/seqid=0 2024-12-04T15:24:19,903 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204fb5f2946349d4c47886e2914dcdc3469_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fb5f2946349d4c47886e2914dcdc3469_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:19,905 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/543ac59396f44108bd9890e2984ce76a, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:19,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/543ac59396f44108bd9890e2984ce76a is 175, key is test_row_0/A:col10/1733325859790/Put/seqid=0 2024-12-04T15:24:19,908 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into 07900a34941644e2911536d774d9bdd2(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:19,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:19,908 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=13, startTime=1733325859757; duration=0sec 2024-12-04T15:24:19,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:19,908 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:19,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742538_1714 (size=12983) 2024-12-04T15:24:19,917 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/7f226f71c159433a9543e80f6c7ed102 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/7f226f71c159433a9543e80f6c7ed102 2024-12-04T15:24:19,922 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into 7f226f71c159433a9543e80f6c7ed102(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:19,922 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:19,922 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=13, startTime=1733325859757; duration=0sec 2024-12-04T15:24:19,922 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:19,922 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:19,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325919921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325919922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325919922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742539_1715 (size=31255) 2024-12-04T15:24:19,941 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/543ac59396f44108bd9890e2984ce76a 2024-12-04T15:24:19,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/b35ca73473e84ea099dd4991d4e20503 is 50, key is test_row_0/B:col10/1733325859790/Put/seqid=0 2024-12-04T15:24:19,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742540_1716 (size=12301) 2024-12-04T15:24:19,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/b35ca73473e84ea099dd4991d4e20503 2024-12-04T15:24:19,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/55d4a7c5e6024a91ad6ccd039f7f39d8 is 50, key is test_row_0/C:col10/1733325859790/Put/seqid=0 2024-12-04T15:24:19,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742541_1717 (size=12301) 2024-12-04T15:24:19,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/55d4a7c5e6024a91ad6ccd039f7f39d8 2024-12-04T15:24:19,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/543ac59396f44108bd9890e2984ce76a as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a 2024-12-04T15:24:20,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a, entries=150, sequenceid=316, filesize=30.5 K 2024-12-04T15:24:20,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/b35ca73473e84ea099dd4991d4e20503 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/b35ca73473e84ea099dd4991d4e20503 2024-12-04T15:24:20,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/b35ca73473e84ea099dd4991d4e20503, entries=150, sequenceid=316, filesize=12.0 K 2024-12-04T15:24:20,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/55d4a7c5e6024a91ad6ccd039f7f39d8 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/55d4a7c5e6024a91ad6ccd039f7f39d8 2024-12-04T15:24:20,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/55d4a7c5e6024a91ad6ccd039f7f39d8, entries=150, sequenceid=316, filesize=12.0 K 2024-12-04T15:24:20,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5345b80b290e2620248a8fde2595e371 in 224ms, sequenceid=316, compaction requested=false 2024-12-04T15:24:20,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:20,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-04T15:24:20,044 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-04T15:24:20,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:20,044 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-04T15:24:20,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:20,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:20,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:20,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:20,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:20,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:20,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120447da12f5290948e2a10af3ace8b21b62_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325859799/Put/seqid=0 2024-12-04T15:24:20,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742542_1718 (size=12454) 2024-12-04T15:24:20,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:20,105 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120447da12f5290948e2a10af3ace8b21b62_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120447da12f5290948e2a10af3ace8b21b62_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:20,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/b93d07ebaf544b6f8e9edad76af7dd42, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:20,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/b93d07ebaf544b6f8e9edad76af7dd42 is 175, key is test_row_0/A:col10/1733325859799/Put/seqid=0 2024-12-04T15:24:20,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742543_1719 (size=31255) 2024-12-04T15:24:20,116 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/b93d07ebaf544b6f8e9edad76af7dd42 2024-12-04T15:24:20,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/d2f537b3c41e466280a9bde64baaef37 is 50, key is test_row_0/B:col10/1733325859799/Put/seqid=0 2024-12-04T15:24:20,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:20,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:20,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742544_1720 (size=12301) 2024-12-04T15:24:20,134 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/d2f537b3c41e466280a9bde64baaef37 2024-12-04T15:24:20,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/981b0aac13794fc8aa958e3204f10dda is 50, key is test_row_0/C:col10/1733325859799/Put/seqid=0 2024-12-04T15:24:20,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742545_1721 (size=12301) 2024-12-04T15:24:20,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325920174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325920176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325920178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325920281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325920282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325920282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325920485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325920487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325920496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-04T15:24:20,561 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/981b0aac13794fc8aa958e3204f10dda 2024-12-04T15:24:20,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/b93d07ebaf544b6f8e9edad76af7dd42 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42 2024-12-04T15:24:20,645 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42, entries=150, sequenceid=331, filesize=30.5 K 2024-12-04T15:24:20,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/d2f537b3c41e466280a9bde64baaef37 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/d2f537b3c41e466280a9bde64baaef37 2024-12-04T15:24:20,674 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/d2f537b3c41e466280a9bde64baaef37, entries=150, sequenceid=331, filesize=12.0 K 2024-12-04T15:24:20,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/981b0aac13794fc8aa958e3204f10dda as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/981b0aac13794fc8aa958e3204f10dda 2024-12-04T15:24:20,725 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/981b0aac13794fc8aa958e3204f10dda, entries=150, sequenceid=331, filesize=12.0 K 2024-12-04T15:24:20,726 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 5345b80b290e2620248a8fde2595e371 in 682ms, sequenceid=331, compaction requested=true 2024-12-04T15:24:20,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:20,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:20,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-04T15:24:20,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-04T15:24:20,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-04T15:24:20,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3130 sec 2024-12-04T15:24:20,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.3190 sec 2024-12-04T15:24:20,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:20,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-04T15:24:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:20,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:20,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204dfef3907570c4a5c983b73b2232e4b94_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:20,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325920860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325920861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325920869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742546_1722 (size=12454) 2024-12-04T15:24:20,925 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:20,951 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204dfef3907570c4a5c983b73b2232e4b94_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204dfef3907570c4a5c983b73b2232e4b94_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:20,955 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/43653734a2014222884f3d4d40f91531, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:20,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/43653734a2014222884f3d4d40f91531 is 175, key is test_row_0/A:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:20,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325920968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325920970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:20,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:20,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325920978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742547_1723 (size=31255) 2024-12-04T15:24:21,009 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/43653734a2014222884f3d4d40f91531 2024-12-04T15:24:21,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/176a35d3f144485488536f4c2c50115b is 50, key is test_row_0/B:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:21,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742548_1724 (size=12301) 2024-12-04T15:24:21,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/176a35d3f144485488536f4c2c50115b 2024-12-04T15:24:21,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/273922188d78448d8af93f342e536f60 is 50, key is test_row_0/C:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:21,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742549_1725 (size=12301) 2024-12-04T15:24:21,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/273922188d78448d8af93f342e536f60 2024-12-04T15:24:21,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325921172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325921174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325921192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/43653734a2014222884f3d4d40f91531 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531 2024-12-04T15:24:21,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531, entries=150, sequenceid=356, filesize=30.5 K 2024-12-04T15:24:21,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/176a35d3f144485488536f4c2c50115b as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/176a35d3f144485488536f4c2c50115b 2024-12-04T15:24:21,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/176a35d3f144485488536f4c2c50115b, entries=150, sequenceid=356, filesize=12.0 K 2024-12-04T15:24:21,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/273922188d78448d8af93f342e536f60 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/273922188d78448d8af93f342e536f60 2024-12-04T15:24:21,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/273922188d78448d8af93f342e536f60, entries=150, sequenceid=356, filesize=12.0 K 2024-12-04T15:24:21,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 5345b80b290e2620248a8fde2595e371 in 480ms, sequenceid=356, compaction requested=true 2024-12-04T15:24:21,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:21,284 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:21,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:21,285 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:21,285 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:21,285 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,286 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/07900a34941644e2911536d774d9bdd2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=122.8 K 2024-12-04T15:24:21,286 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,286 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/07900a34941644e2911536d774d9bdd2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531] 2024-12-04T15:24:21,286 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07900a34941644e2911536d774d9bdd2, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325858808 2024-12-04T15:24:21,286 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 543ac59396f44108bd9890e2984ce76a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325859170 2024-12-04T15:24:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:21,286 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:21,287 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b93d07ebaf544b6f8e9edad76af7dd42, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733325859799 2024-12-04T15:24:21,287 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43653734a2014222884f3d4d40f91531, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325860172 2024-12-04T15:24:21,288 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:21,288 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:21,288 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,289 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ed11bdbdf64e4160970d26505eb78799, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/b35ca73473e84ea099dd4991d4e20503, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/d2f537b3c41e466280a9bde64baaef37, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/176a35d3f144485488536f4c2c50115b] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=48.7 K 2024-12-04T15:24:21,289 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting ed11bdbdf64e4160970d26505eb78799, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325858808 2024-12-04T15:24:21,290 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting b35ca73473e84ea099dd4991d4e20503, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325859170 2024-12-04T15:24:21,290 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting d2f537b3c41e466280a9bde64baaef37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733325859799 2024-12-04T15:24:21,291 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 176a35d3f144485488536f4c2c50115b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325860172 2024-12-04T15:24:21,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:21,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:21,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:21,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:21,300 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:21,306 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#613 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:21,306 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1f7c750f5c044ed4947c4ff454b92986 is 50, key is test_row_0/B:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:21,308 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204064c9e0fdcff4c65b6c1783ecdffa7a9_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:21,311 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204064c9e0fdcff4c65b6c1783ecdffa7a9_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:21,311 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204064c9e0fdcff4c65b6c1783ecdffa7a9_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:21,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742550_1726 (size=13119) 2024-12-04T15:24:21,356 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/1f7c750f5c044ed4947c4ff454b92986 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1f7c750f5c044ed4947c4ff454b92986 2024-12-04T15:24:21,368 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into 1f7c750f5c044ed4947c4ff454b92986(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:21,368 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:21,368 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=12, startTime=1733325861286; duration=0sec 2024-12-04T15:24:21,368 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:21,368 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:21,368 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:21,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742551_1727 (size=4469) 2024-12-04T15:24:21,372 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:21,372 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:21,373 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,373 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/7f226f71c159433a9543e80f6c7ed102, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/55d4a7c5e6024a91ad6ccd039f7f39d8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/981b0aac13794fc8aa958e3204f10dda, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/273922188d78448d8af93f342e536f60] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=48.7 K 2024-12-04T15:24:21,373 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f226f71c159433a9543e80f6c7ed102, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733325858808 2024-12-04T15:24:21,373 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 55d4a7c5e6024a91ad6ccd039f7f39d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733325859170 2024-12-04T15:24:21,374 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 981b0aac13794fc8aa958e3204f10dda, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733325859799 2024-12-04T15:24:21,374 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 273922188d78448d8af93f342e536f60, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325860172 2024-12-04T15:24:21,384 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#612 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:21,385 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#614 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:21,386 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/bcfaf2d617d54dc8ba5bedb2126db50d is 175, key is test_row_0/A:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:21,387 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/fe7bde190ac24cbd8f248bee5a393324 is 50, key is test_row_0/C:col10/1733325860795/Put/seqid=0 2024-12-04T15:24:21,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742552_1728 (size=13119) 2024-12-04T15:24:21,441 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/fe7bde190ac24cbd8f248bee5a393324 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/fe7bde190ac24cbd8f248bee5a393324 2024-12-04T15:24:21,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742553_1729 (size=32073) 2024-12-04T15:24:21,456 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into fe7bde190ac24cbd8f248bee5a393324(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:21,456 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:21,456 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=12, startTime=1733325861298; duration=0sec 2024-12-04T15:24:21,456 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:21,456 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:21,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-04T15:24:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:21,500 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/bcfaf2d617d54dc8ba5bedb2126db50d as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bcfaf2d617d54dc8ba5bedb2126db50d 2024-12-04T15:24:21,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:21,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:21,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:21,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:21,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:21,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:21,514 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into bcfaf2d617d54dc8ba5bedb2126db50d(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:21,514 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:21,514 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=12, startTime=1733325861284; duration=0sec 2024-12-04T15:24:21,514 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:21,514 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-04T15:24:21,524 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-04T15:24:21,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044a3ee12c4d744ff5895aace530d1f0ee_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325860842/Put/seqid=0 2024-12-04T15:24:21,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-04T15:24:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-04T15:24:21,557 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:21,558 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:21,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:21,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742554_1730 (size=12454) 2024-12-04T15:24:21,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325921618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325921621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325921620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-04T15:24:21,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:21,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:21,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:21,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:21,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325921724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325921732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325921736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-04T15:24:21,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:21,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:21,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:21,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:21,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:21,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:21,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325921929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325921948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:21,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325921948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:21,966 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:21,970 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412044a3ee12c4d744ff5895aace530d1f0ee_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044a3ee12c4d744ff5895aace530d1f0ee_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:21,971 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/8aa39b16ac774ee8bb5203f21e04ed93, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:21,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/8aa39b16ac774ee8bb5203f21e04ed93 is 175, key is test_row_0/A:col10/1733325860842/Put/seqid=0 2024-12-04T15:24:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742555_1731 (size=31255) 2024-12-04T15:24:22,035 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,040 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/8aa39b16ac774ee8bb5203f21e04ed93 2024-12-04T15:24:22,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0369d2b1355d4a6ea82d2b20abe05694 is 50, key is test_row_0/B:col10/1733325860842/Put/seqid=0 2024-12-04T15:24:22,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742556_1732 (size=12301) 2024-12-04T15:24:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-04T15:24:22,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:22,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:22,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,216 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T15:24:22,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325922236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325922253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325922268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,349 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:22,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:22,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,509 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:22,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:22,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,510 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:22,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0369d2b1355d4a6ea82d2b20abe05694 2024-12-04T15:24:22,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1e146893d79d43318b0d40a7c0046ead is 50, key is test_row_0/C:col10/1733325860842/Put/seqid=0 2024-12-04T15:24:22,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742557_1733 (size=12301) 2024-12-04T15:24:22,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1e146893d79d43318b0d40a7c0046ead 2024-12-04T15:24:22,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/8aa39b16ac774ee8bb5203f21e04ed93 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93 2024-12-04T15:24:22,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93, entries=150, sequenceid=372, filesize=30.5 K 2024-12-04T15:24:22,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0369d2b1355d4a6ea82d2b20abe05694 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0369d2b1355d4a6ea82d2b20abe05694 2024-12-04T15:24:22,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0369d2b1355d4a6ea82d2b20abe05694, entries=150, sequenceid=372, filesize=12.0 K 2024-12-04T15:24:22,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1e146893d79d43318b0d40a7c0046ead as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1e146893d79d43318b0d40a7c0046ead 2024-12-04T15:24:22,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1e146893d79d43318b0d40a7c0046ead, entries=150, sequenceid=372, filesize=12.0 K 2024-12-04T15:24:22,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5345b80b290e2620248a8fde2595e371 in 1124ms, sequenceid=372, compaction requested=false 2024-12-04T15:24:22,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:22,663 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-04T15:24:22,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,665 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:24:22,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:22,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:22,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:22,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:22,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:22,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-04T15:24:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204f12d8dde348849638b40ef6ad451fa9e_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325861618/Put/seqid=0 2024-12-04T15:24:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742558_1734 (size=12454) 2024-12-04T15:24:22,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:22,716 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204f12d8dde348849638b40ef6ad451fa9e_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f12d8dde348849638b40ef6ad451fa9e_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:22,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fda484090774713bcecf104c8672b14, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:22,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fda484090774713bcecf104c8672b14 is 175, key is test_row_0/A:col10/1733325861618/Put/seqid=0 2024-12-04T15:24:22,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742559_1735 (size=31255) 2024-12-04T15:24:22,765 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fda484090774713bcecf104c8672b14 2024-12-04T15:24:22,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:22,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:22,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0ca6c24809134fdbb4a597b2bf3ff367 is 50, key is test_row_0/B:col10/1733325861618/Put/seqid=0 2024-12-04T15:24:22,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325922804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325922805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325922805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742560_1736 (size=12301) 2024-12-04T15:24:22,818 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0ca6c24809134fdbb4a597b2bf3ff367 2024-12-04T15:24:22,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7 is 50, key is test_row_0/C:col10/1733325861618/Put/seqid=0 2024-12-04T15:24:22,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742561_1737 (size=12301) 2024-12-04T15:24:22,861 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7 2024-12-04T15:24:22,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/0fda484090774713bcecf104c8672b14 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14 2024-12-04T15:24:22,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325922911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325922911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325922911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:22,925 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14, entries=150, sequenceid=395, filesize=30.5 K 2024-12-04T15:24:22,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/0ca6c24809134fdbb4a597b2bf3ff367 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ca6c24809134fdbb4a597b2bf3ff367 2024-12-04T15:24:22,933 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ca6c24809134fdbb4a597b2bf3ff367, entries=150, sequenceid=395, filesize=12.0 K 2024-12-04T15:24:22,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7 2024-12-04T15:24:22,940 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7, entries=150, sequenceid=395, filesize=12.0 K 2024-12-04T15:24:22,944 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5345b80b290e2620248a8fde2595e371 in 279ms, sequenceid=395, compaction requested=true 2024-12-04T15:24:22,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:22,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:22,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-04T15:24:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-04T15:24:22,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-04T15:24:22,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4030 sec 2024-12-04T15:24:22,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.4410 sec 2024-12-04T15:24:23,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:24:23,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:23,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:23,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:23,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:23,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:23,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:23,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:23,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204342bcf4585634c9c925a72c6dfe787c4_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:23,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325923158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325923159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325923163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325923168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325923173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742562_1738 (size=12454) 2024-12-04T15:24:23,204 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:23,256 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204342bcf4585634c9c925a72c6dfe787c4_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204342bcf4585634c9c925a72c6dfe787c4_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:23,263 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/29acdd61886b466387467b9a0e9baee6, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:23,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/29acdd61886b466387467b9a0e9baee6 is 175, key is test_row_0/A:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:23,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325923273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325923273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325923279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325923279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325923284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742563_1739 (size=31255) 2024-12-04T15:24:23,322 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=411, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/29acdd61886b466387467b9a0e9baee6 2024-12-04T15:24:23,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/bd58f0c637a447268cd641fe6bdbec7c is 50, key is test_row_0/B:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:23,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742564_1740 (size=12301) 2024-12-04T15:24:23,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325923485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325923495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325923496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325923495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/bd58f0c637a447268cd641fe6bdbec7c 2024-12-04T15:24:23,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325923511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/db997a85cf5048dfbfd70f3c67d1404e is 50, key is test_row_0/C:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:23,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742565_1741 (size=12301) 2024-12-04T15:24:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-04T15:24:23,669 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-04T15:24:23,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-04T15:24:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-04T15:24:23,705 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:24:23,706 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:24:23,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:24:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-04T15:24:23,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-04T15:24:23,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325923820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325923820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325923821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325923821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325923821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,860 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:23,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:23,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:23,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:23,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:23,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:23,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:23,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-04T15:24:24,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/db997a85cf5048dfbfd70f3c67d1404e 2024-12-04T15:24:24,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/29acdd61886b466387467b9a0e9baee6 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6 2024-12-04T15:24:24,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6, entries=150, sequenceid=411, filesize=30.5 K 2024-12-04T15:24:24,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/bd58f0c637a447268cd641fe6bdbec7c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/bd58f0c637a447268cd641fe6bdbec7c 2024-12-04T15:24:24,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/bd58f0c637a447268cd641fe6bdbec7c, entries=150, sequenceid=411, filesize=12.0 K 2024-12-04T15:24:24,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/db997a85cf5048dfbfd70f3c67d1404e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/db997a85cf5048dfbfd70f3c67d1404e 2024-12-04T15:24:24,184 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:24,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:24,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/db997a85cf5048dfbfd70f3c67d1404e, entries=150, sequenceid=411, filesize=12.0 K 2024-12-04T15:24:24,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 5345b80b290e2620248a8fde2595e371 in 1100ms, sequenceid=411, compaction requested=true 2024-12-04T15:24:24,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:A, priority=-2147483648, current under compaction store size is 1 2024-12-04T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:B, priority=-2147483648, current under compaction store size is 2 2024-12-04T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5345b80b290e2620248a8fde2595e371:C, priority=-2147483648, current under compaction store size is 3 2024-12-04T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:24,221 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:24,221 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:24,236 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:24,236 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/B is initiating minor compaction (all files) 2024-12-04T15:24:24,236 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/B in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,237 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1f7c750f5c044ed4947c4ff454b92986, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0369d2b1355d4a6ea82d2b20abe05694, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ca6c24809134fdbb4a597b2bf3ff367, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/bd58f0c637a447268cd641fe6bdbec7c] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=48.8 K 2024-12-04T15:24:24,239 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125838 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:24,239 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/A is initiating minor compaction (all files) 2024-12-04T15:24:24,239 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/A in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,240 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bcfaf2d617d54dc8ba5bedb2126db50d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=122.9 K 2024-12-04T15:24:24,240 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,240 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. files: [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bcfaf2d617d54dc8ba5bedb2126db50d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6] 2024-12-04T15:24:24,240 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f7c750f5c044ed4947c4ff454b92986, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325860172 2024-12-04T15:24:24,244 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0369d2b1355d4a6ea82d2b20abe05694, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733325860842 2024-12-04T15:24:24,244 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ca6c24809134fdbb4a597b2bf3ff367, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733325861548 2024-12-04T15:24:24,248 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcfaf2d617d54dc8ba5bedb2126db50d, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325860172 2024-12-04T15:24:24,248 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting bd58f0c637a447268cd641fe6bdbec7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733325862781 2024-12-04T15:24:24,249 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8aa39b16ac774ee8bb5203f21e04ed93, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733325860842 2024-12-04T15:24:24,256 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fda484090774713bcecf104c8672b14, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733325861548 2024-12-04T15:24:24,261 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29acdd61886b466387467b9a0e9baee6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733325862781 2024-12-04T15:24:24,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-04T15:24:24,314 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#B#compaction#624 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:24,315 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/3b49e581b50b4521aff3bbe4d6ea4f43 is 50, key is test_row_0/B:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:24,336 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:24,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-04T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(8581): Flush requested on 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:24,352 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,357 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241204087dce23027a40b7adf0ea0a62a35fc7_5345b80b290e2620248a8fde2595e371 store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:24,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241204087dce23027a40b7adf0ea0a62a35fc7_5345b80b290e2620248a8fde2595e371, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:24,361 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204087dce23027a40b7adf0ea0a62a35fc7_5345b80b290e2620248a8fde2595e371 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:24,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:24,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742566_1742 (size=13255) 2024-12-04T15:24:24,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325924364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325924367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325924371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325924372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c4ffba01bfd24e488978e49f4341e3f0_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325863166/Put/seqid=0 2024-12-04T15:24:24,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325924377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,416 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/3b49e581b50b4521aff3bbe4d6ea4f43 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3b49e581b50b4521aff3bbe4d6ea4f43 2024-12-04T15:24:24,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742567_1743 (size=4469) 2024-12-04T15:24:24,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742568_1744 (size=14994) 2024-12-04T15:24:24,441 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:24,444 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#A#compaction#625 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:24,445 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/08f2e692b24b4c13b2da0d2f4841abdd is 175, key is test_row_0/A:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:24,445 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/B of 5345b80b290e2620248a8fde2595e371 into 3b49e581b50b4521aff3bbe4d6ea4f43(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:24,445 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:24,445 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/B, priority=12, startTime=1733325864221; duration=0sec 2024-12-04T15:24:24,445 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-04T15:24:24,445 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:B 2024-12-04T15:24:24,445 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-04T15:24:24,472 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-04T15:24:24,472 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1540): 5345b80b290e2620248a8fde2595e371/C is initiating minor compaction (all files) 2024-12-04T15:24:24,472 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5345b80b290e2620248a8fde2595e371/C in TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,472 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/fe7bde190ac24cbd8f248bee5a393324, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1e146893d79d43318b0d40a7c0046ead, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/db997a85cf5048dfbfd70f3c67d1404e] into tmpdir=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp, totalSize=48.8 K 2024-12-04T15:24:24,473 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241204c4ffba01bfd24e488978e49f4341e3f0_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c4ffba01bfd24e488978e49f4341e3f0_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:24,476 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting fe7bde190ac24cbd8f248bee5a393324, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733325860172 2024-12-04T15:24:24,478 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6210541a7d4b496699bab91168e15ee2, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:24,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6210541a7d4b496699bab91168e15ee2 is 175, key is test_row_0/A:col10/1733325863166/Put/seqid=0 2024-12-04T15:24:24,479 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e146893d79d43318b0d40a7c0046ead, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733325860842 2024-12-04T15:24:24,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325924477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325924480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325924482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,487 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting 17f4ff5fb6cd42c0b7fedc8c4cb0f7b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733325861548 2024-12-04T15:24:24,489 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] compactions.Compactor(224): Compacting db997a85cf5048dfbfd70f3c67d1404e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733325862781 2024-12-04T15:24:24,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325924488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742569_1745 (size=32209) 2024-12-04T15:24:24,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325924489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,506 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/08f2e692b24b4c13b2da0d2f4841abdd as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/08f2e692b24b4c13b2da0d2f4841abdd 2024-12-04T15:24:24,511 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/A of 5345b80b290e2620248a8fde2595e371 into 08f2e692b24b4c13b2da0d2f4841abdd(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:24,512 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:24,512 INFO [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/A, priority=12, startTime=1733325864221; duration=0sec 2024-12-04T15:24:24,512 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:24,512 DEBUG [RS:0;645c2dbfef2e:42169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:A 2024-12-04T15:24:24,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:24,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:24,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,545 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5345b80b290e2620248a8fde2595e371#C#compaction#627 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T15:24:24,546 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/9a85b70a86c0441e8a7b28bcc7991af2 is 50, key is test_row_0/C:col10/1733325862781/Put/seqid=0 2024-12-04T15:24:24,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742570_1746 (size=39949) 2024-12-04T15:24:24,552 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=433, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6210541a7d4b496699bab91168e15ee2 2024-12-04T15:24:24,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742571_1747 (size=13255) 2024-12-04T15:24:24,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/ea65dfdb9f7f41378b512565781f8748 is 50, key is test_row_0/B:col10/1733325863166/Put/seqid=0 2024-12-04T15:24:24,604 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/9a85b70a86c0441e8a7b28bcc7991af2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/9a85b70a86c0441e8a7b28bcc7991af2 2024-12-04T15:24:24,609 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5345b80b290e2620248a8fde2595e371/C of 5345b80b290e2620248a8fde2595e371 into 9a85b70a86c0441e8a7b28bcc7991af2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T15:24:24,609 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:24,609 INFO [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371., storeName=5345b80b290e2620248a8fde2595e371/C, priority=12, startTime=1733325864221; duration=0sec 2024-12-04T15:24:24,609 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T15:24:24,609 DEBUG [RS:0;645c2dbfef2e:42169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5345b80b290e2620248a8fde2595e371:C 2024-12-04T15:24:24,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742572_1748 (size=12301) 2024-12-04T15:24:24,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/ea65dfdb9f7f41378b512565781f8748 2024-12-04T15:24:24,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/2551c3ab47f14c088a2c6a60af1331b2 is 50, key is test_row_0/C:col10/1733325863166/Put/seqid=0 2024-12-04T15:24:24,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742573_1749 (size=12301) 2024-12-04T15:24:24,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/2551c3ab47f14c088a2c6a60af1331b2 2024-12-04T15:24:24,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40358 deadline: 1733325924685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40330 deadline: 1733325924688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40320 deadline: 1733325924688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40354 deadline: 1733325924696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-04T15:24:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1733325924696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,700 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/6210541a7d4b496699bab91168e15ee2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6210541a7d4b496699bab91168e15ee2 2024-12-04T15:24:24,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:24,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. as already flushing 2024-12-04T15:24:24,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:24:24,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6210541a7d4b496699bab91168e15ee2, entries=200, sequenceid=433, filesize=39.0 K 2024-12-04T15:24:24,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/ea65dfdb9f7f41378b512565781f8748 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ea65dfdb9f7f41378b512565781f8748 2024-12-04T15:24:24,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ea65dfdb9f7f41378b512565781f8748, entries=150, sequenceid=433, filesize=12.0 K 2024-12-04T15:24:24,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/2551c3ab47f14c088a2c6a60af1331b2 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/2551c3ab47f14c088a2c6a60af1331b2 2024-12-04T15:24:24,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/2551c3ab47f14c088a2c6a60af1331b2, entries=150, sequenceid=433, filesize=12.0 K 2024-12-04T15:24:24,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 5345b80b290e2620248a8fde2595e371 in 386ms, sequenceid=433, compaction requested=false 2024-12-04T15:24:24,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:24,788 DEBUG [Thread-2766 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x663aa62c to 127.0.0.1:55739 2024-12-04T15:24:24,788 DEBUG [Thread-2770 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x531e99d9 to 127.0.0.1:55739 2024-12-04T15:24:24,788 DEBUG [Thread-2774 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2855176e to 127.0.0.1:55739 2024-12-04T15:24:24,788 DEBUG [Thread-2766 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,788 DEBUG [Thread-2770 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,788 DEBUG [Thread-2774 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,791 DEBUG [Thread-2768 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c6369bd to 127.0.0.1:55739 2024-12-04T15:24:24,791 DEBUG [Thread-2768 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,792 DEBUG [Thread-2772 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x192bd4a8 to 127.0.0.1:55739 2024-12-04T15:24:24,792 DEBUG [Thread-2772 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-04T15:24:24,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:24,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-04T15:24:24,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,871 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-04T15:24:24,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:24,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:24,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:24,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:24,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:24,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:24,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120429c81e69d3e24dc0953c35616a83cc57_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325864367/Put/seqid=0 2024-12-04T15:24:24,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742574_1750 (size=12454) 2024-12-04T15:24:24,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:24,898 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120429c81e69d3e24dc0953c35616a83cc57_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120429c81e69d3e24dc0953c35616a83cc57_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:24,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd374a71e7084aeebdae4b078f326ebb, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:24,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd374a71e7084aeebdae4b078f326ebb is 175, key is test_row_0/A:col10/1733325864367/Put/seqid=0 2024-12-04T15:24:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742575_1751 (size=31255) 2024-12-04T15:24:24,910 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=451, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd374a71e7084aeebdae4b078f326ebb 2024-12-04T15:24:24,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/3a0e24047d6c4cb095a0a515b6db4342 is 50, key is test_row_0/B:col10/1733325864367/Put/seqid=0 2024-12-04T15:24:24,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742576_1752 (size=12301) 2024-12-04T15:24:24,928 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/3a0e24047d6c4cb095a0a515b6db4342 2024-12-04T15:24:24,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/aaa984d6b3bb4e9abe60a8f5d2c91968 is 50, key is test_row_0/C:col10/1733325864367/Put/seqid=0 2024-12-04T15:24:24,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742577_1753 (size=12301) 2024-12-04T15:24:24,945 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/aaa984d6b3bb4e9abe60a8f5d2c91968 2024-12-04T15:24:24,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/fd374a71e7084aeebdae4b078f326ebb as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd374a71e7084aeebdae4b078f326ebb 2024-12-04T15:24:24,951 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd374a71e7084aeebdae4b078f326ebb, entries=150, sequenceid=451, filesize=30.5 K 2024-12-04T15:24:24,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/3a0e24047d6c4cb095a0a515b6db4342 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3a0e24047d6c4cb095a0a515b6db4342 2024-12-04T15:24:24,955 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3a0e24047d6c4cb095a0a515b6db4342, entries=150, sequenceid=451, filesize=12.0 K 2024-12-04T15:24:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/aaa984d6b3bb4e9abe60a8f5d2c91968 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/aaa984d6b3bb4e9abe60a8f5d2c91968 2024-12-04T15:24:24,958 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/aaa984d6b3bb4e9abe60a8f5d2c91968, entries=150, sequenceid=451, filesize=12.0 K 2024-12-04T15:24:24,959 INFO [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 5345b80b290e2620248a8fde2595e371 in 88ms, sequenceid=451, compaction requested=true 2024-12-04T15:24:24,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:24,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:24,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/645c2dbfef2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-04T15:24:24,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-04T15:24:24,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-04T15:24:24,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2540 sec 2024-12-04T15:24:24,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.2650 sec 2024-12-04T15:24:24,994 DEBUG [Thread-2759 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b3a2c2f to 127.0.0.1:55739 2024-12-04T15:24:24,994 DEBUG [Thread-2759 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,996 DEBUG [Thread-2763 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x096b11d1 to 127.0.0.1:55739 2024-12-04T15:24:24,996 DEBUG [Thread-2763 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:24,997 DEBUG [Thread-2757 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04cd1462 to 127.0.0.1:55739 2024-12-04T15:24:24,997 DEBUG [Thread-2757 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:25,001 DEBUG [Thread-2761 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x211bcc55 to 127.0.0.1:55739 2024-12-04T15:24:25,001 DEBUG [Thread-2761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:25,001 DEBUG [Thread-2755 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x401484c4 to 127.0.0.1:55739 2024-12-04T15:24:25,001 DEBUG [Thread-2755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:25,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-04T15:24:25,817 INFO [Thread-2765 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-04T15:24:25,817 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 125 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 95 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3394 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3308 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3200 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3430 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3344 2024-12-04T15:24:25,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-04T15:24:25,818 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:24:25,818 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53e238d3 to 127.0.0.1:55739 2024-12-04T15:24:25,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:25,819 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-04T15:24:25,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-04T15:24:25,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:25,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-04T15:24:25,822 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325865822"}]},"ts":"1733325865822"} 2024-12-04T15:24:25,823 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-04T15:24:25,826 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-04T15:24:25,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-04T15:24:25,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, UNASSIGN}] 2024-12-04T15:24:25,828 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, UNASSIGN 2024-12-04T15:24:25,830 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=CLOSING, regionLocation=645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:25,831 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-04T15:24:25,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856}] 2024-12-04T15:24:25,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-04T15:24:25,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:25,983 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing 5345b80b290e2620248a8fde2595e371, disabling compactions & flushes 2024-12-04T15:24:25,983 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. after waiting 0 ms 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:25,983 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing 5345b80b290e2620248a8fde2595e371 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=A 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=B 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5345b80b290e2620248a8fde2595e371, store=C 2024-12-04T15:24:25,983 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-04T15:24:25,989 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412047e492355f7e041ab853b6fce32f1580f_5345b80b290e2620248a8fde2595e371 is 50, key is test_row_0/A:col10/1733325865000/Put/seqid=0 2024-12-04T15:24:25,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742578_1754 (size=12454) 2024-12-04T15:24:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-04T15:24:26,397 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:24:26,401 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412047e492355f7e041ab853b6fce32f1580f_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047e492355f7e041ab853b6fce32f1580f_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:26,402 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/850161d944b54abc832025b83419e332, store: [table=TestAcidGuarantees family=A region=5345b80b290e2620248a8fde2595e371] 2024-12-04T15:24:26,403 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/850161d944b54abc832025b83419e332 is 175, key is test_row_0/A:col10/1733325865000/Put/seqid=0 2024-12-04T15:24:26,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742579_1755 (size=31255) 2024-12-04T15:24:26,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-04T15:24:26,808 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=459, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/850161d944b54abc832025b83419e332 2024-12-04T15:24:26,813 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9540f0b139494f44bba33c8dd158dd1f is 50, key is test_row_0/B:col10/1733325865000/Put/seqid=0 2024-12-04T15:24:26,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742580_1756 (size=12301) 2024-12-04T15:24:26,817 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9540f0b139494f44bba33c8dd158dd1f 2024-12-04T15:24:26,821 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1c05143d4f864cc2b950bc9372b75c3e is 50, key is test_row_0/C:col10/1733325865000/Put/seqid=0 2024-12-04T15:24:26,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742581_1757 (size=12301) 2024-12-04T15:24:26,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-04T15:24:27,225 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1c05143d4f864cc2b950bc9372b75c3e 2024-12-04T15:24:27,229 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/A/850161d944b54abc832025b83419e332 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/850161d944b54abc832025b83419e332 2024-12-04T15:24:27,233 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/850161d944b54abc832025b83419e332, entries=150, sequenceid=459, filesize=30.5 K 2024-12-04T15:24:27,233 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/B/9540f0b139494f44bba33c8dd158dd1f as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9540f0b139494f44bba33c8dd158dd1f 2024-12-04T15:24:27,237 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9540f0b139494f44bba33c8dd158dd1f, entries=150, sequenceid=459, filesize=12.0 K 2024-12-04T15:24:27,237 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/.tmp/C/1c05143d4f864cc2b950bc9372b75c3e as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1c05143d4f864cc2b950bc9372b75c3e 2024-12-04T15:24:27,241 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1c05143d4f864cc2b950bc9372b75c3e, entries=150, sequenceid=459, filesize=12.0 K 2024-12-04T15:24:27,242 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 5345b80b290e2620248a8fde2595e371 in 1258ms, sequenceid=459, compaction requested=true 2024-12-04T15:24:27,242 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/98223fe4747b4eebaf5e615fac66832a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bf1643f0fb284718a252d46dc8dc0e1a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ec223e494cb94fb3a27876ba242a5430, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/60977262f11243eb8133ab1278a62c1e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/5f2637c6132a45a4b49727d921e4bfe5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/07900a34941644e2911536d774d9bdd2, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bcfaf2d617d54dc8ba5bedb2126db50d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6] to archive 2024-12-04T15:24:27,244 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:24:27,246 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ea0fbf495d4e4de3bc14c8bb2e2d0490 2024-12-04T15:24:27,247 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6565a561d65c41e7a598ea5b808592ad 2024-12-04T15:24:27,248 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/98223fe4747b4eebaf5e615fac66832a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/98223fe4747b4eebaf5e615fac66832a 2024-12-04T15:24:27,249 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fc6c959dde7415c8f8ec11921b1ebd4 2024-12-04T15:24:27,251 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3c83100b07f44b0f8489434b07b779ba 2024-12-04T15:24:27,252 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/2598bd88ef154b49b8b86c9fca024a0a 2024-12-04T15:24:27,253 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e1ecec2448854ff9acb4e9f6fa324fc2 2024-12-04T15:24:27,254 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bf1643f0fb284718a252d46dc8dc0e1a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bf1643f0fb284718a252d46dc8dc0e1a 2024-12-04T15:24:27,256 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e2a676a00df94308b8331ff7ca99800f 2024-12-04T15:24:27,257 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ec223e494cb94fb3a27876ba242a5430 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/ec223e494cb94fb3a27876ba242a5430 2024-12-04T15:24:27,258 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/9981e058f1b04e968b2dcafcfb5277fc 2024-12-04T15:24:27,260 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd6417d08f3b4f72b1777f1d68620080 2024-12-04T15:24:27,261 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/e5a00f56f2274ef7b6b8a3649adebe88 2024-12-04T15:24:27,262 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/60977262f11243eb8133ab1278a62c1e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/60977262f11243eb8133ab1278a62c1e 2024-12-04T15:24:27,263 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/f83d5a59a0fd4479851238d9d8b71db7 2024-12-04T15:24:27,265 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/976a51e368a94fbda218de488252d2be 2024-12-04T15:24:27,266 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/186e46218e814fdf8446eab8559dfbef 2024-12-04T15:24:27,267 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/5f2637c6132a45a4b49727d921e4bfe5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/5f2637c6132a45a4b49727d921e4bfe5 2024-12-04T15:24:27,268 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/410f09271e894e5a86a8aa859d87bf71 2024-12-04T15:24:27,269 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/3f956f958fed48f59494a1ff0a4ed059 2024-12-04T15:24:27,271 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/07900a34941644e2911536d774d9bdd2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/07900a34941644e2911536d774d9bdd2 2024-12-04T15:24:27,272 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/543ac59396f44108bd9890e2984ce76a 2024-12-04T15:24:27,273 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/b93d07ebaf544b6f8e9edad76af7dd42 2024-12-04T15:24:27,274 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bcfaf2d617d54dc8ba5bedb2126db50d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/bcfaf2d617d54dc8ba5bedb2126db50d 2024-12-04T15:24:27,276 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/43653734a2014222884f3d4d40f91531 2024-12-04T15:24:27,277 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/8aa39b16ac774ee8bb5203f21e04ed93 2024-12-04T15:24:27,278 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/0fda484090774713bcecf104c8672b14 2024-12-04T15:24:27,279 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/29acdd61886b466387467b9a0e9baee6 2024-12-04T15:24:27,281 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1ed521b7cc534f6ba02405c688cd423d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/e0ee9826896c47b3ac679e55358530f4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9e0b93f11feb4ff6a6ae35c6b4177786, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/73103e0d0230457d9feebe81df4f7fa6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9246ae95f3334fd1abdbedd6a9c12aa0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb54a477650d451bb8449f0ef5c42cd6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/96c40afe942b461ab33313754de63cd1, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1d23a6152afd4c9ea17b3332e996c0b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/2d0cbc6eb36448118ceebf72a26ef622, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0d3fd99219a34253a759d7fa64316faa, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/cf27f4265656466cb9080f9f666c01d0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/964ce5f8d93a42e8bd966722267d67dc, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb6abe3cf6004bcba1f1376ca42eb26e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ec81ff53ec04e6aab15c987ebd5b711, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/f930758bfb194482bb9356316a7de0c4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9c590a1f54f8469389e475a007a68502, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/15f0bd05e51b42e2a28e430e2705d476, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0f9f56f16221463a906491ffce1e32b9, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/dfc794dc0d084a7e8d5b75ea1c169b7c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ed11bdbdf64e4160970d26505eb78799, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/8a1c98a88e644d1c82d44989c8310bc4, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/b35ca73473e84ea099dd4991d4e20503, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/d2f537b3c41e466280a9bde64baaef37, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1f7c750f5c044ed4947c4ff454b92986, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/176a35d3f144485488536f4c2c50115b, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0369d2b1355d4a6ea82d2b20abe05694, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ca6c24809134fdbb4a597b2bf3ff367, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/bd58f0c637a447268cd641fe6bdbec7c] to archive 2024-12-04T15:24:27,281 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:24:27,283 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1ed521b7cc534f6ba02405c688cd423d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1ed521b7cc534f6ba02405c688cd423d 2024-12-04T15:24:27,284 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/e0ee9826896c47b3ac679e55358530f4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/e0ee9826896c47b3ac679e55358530f4 2024-12-04T15:24:27,286 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9e0b93f11feb4ff6a6ae35c6b4177786 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9e0b93f11feb4ff6a6ae35c6b4177786 2024-12-04T15:24:27,287 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/73103e0d0230457d9feebe81df4f7fa6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/73103e0d0230457d9feebe81df4f7fa6 2024-12-04T15:24:27,288 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9246ae95f3334fd1abdbedd6a9c12aa0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9246ae95f3334fd1abdbedd6a9c12aa0 2024-12-04T15:24:27,289 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb54a477650d451bb8449f0ef5c42cd6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb54a477650d451bb8449f0ef5c42cd6 2024-12-04T15:24:27,290 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/96c40afe942b461ab33313754de63cd1 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/96c40afe942b461ab33313754de63cd1 2024-12-04T15:24:27,292 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1d23a6152afd4c9ea17b3332e996c0b6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1d23a6152afd4c9ea17b3332e996c0b6 2024-12-04T15:24:27,293 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/2d0cbc6eb36448118ceebf72a26ef622 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/2d0cbc6eb36448118ceebf72a26ef622 2024-12-04T15:24:27,294 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0d3fd99219a34253a759d7fa64316faa to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0d3fd99219a34253a759d7fa64316faa 2024-12-04T15:24:27,295 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/cf27f4265656466cb9080f9f666c01d0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/cf27f4265656466cb9080f9f666c01d0 2024-12-04T15:24:27,296 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/964ce5f8d93a42e8bd966722267d67dc to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/964ce5f8d93a42e8bd966722267d67dc 2024-12-04T15:24:27,297 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb6abe3cf6004bcba1f1376ca42eb26e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/eb6abe3cf6004bcba1f1376ca42eb26e 2024-12-04T15:24:27,299 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ec81ff53ec04e6aab15c987ebd5b711 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ec81ff53ec04e6aab15c987ebd5b711 2024-12-04T15:24:27,300 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/f930758bfb194482bb9356316a7de0c4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/f930758bfb194482bb9356316a7de0c4 2024-12-04T15:24:27,301 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9c590a1f54f8469389e475a007a68502 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9c590a1f54f8469389e475a007a68502 2024-12-04T15:24:27,303 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/15f0bd05e51b42e2a28e430e2705d476 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/15f0bd05e51b42e2a28e430e2705d476 2024-12-04T15:24:27,304 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0f9f56f16221463a906491ffce1e32b9 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0f9f56f16221463a906491ffce1e32b9 2024-12-04T15:24:27,305 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/dfc794dc0d084a7e8d5b75ea1c169b7c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/dfc794dc0d084a7e8d5b75ea1c169b7c 2024-12-04T15:24:27,307 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ed11bdbdf64e4160970d26505eb78799 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ed11bdbdf64e4160970d26505eb78799 2024-12-04T15:24:27,311 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/8a1c98a88e644d1c82d44989c8310bc4 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/8a1c98a88e644d1c82d44989c8310bc4 2024-12-04T15:24:27,323 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/b35ca73473e84ea099dd4991d4e20503 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/b35ca73473e84ea099dd4991d4e20503 2024-12-04T15:24:27,324 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/d2f537b3c41e466280a9bde64baaef37 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/d2f537b3c41e466280a9bde64baaef37 2024-12-04T15:24:27,328 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1f7c750f5c044ed4947c4ff454b92986 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/1f7c750f5c044ed4947c4ff454b92986 2024-12-04T15:24:27,329 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/176a35d3f144485488536f4c2c50115b to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/176a35d3f144485488536f4c2c50115b 2024-12-04T15:24:27,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0369d2b1355d4a6ea82d2b20abe05694 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0369d2b1355d4a6ea82d2b20abe05694 2024-12-04T15:24:27,331 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ca6c24809134fdbb4a597b2bf3ff367 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/0ca6c24809134fdbb4a597b2bf3ff367 2024-12-04T15:24:27,332 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/bd58f0c637a447268cd641fe6bdbec7c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/bd58f0c637a447268cd641fe6bdbec7c 2024-12-04T15:24:27,334 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/34149d29674e4bd29ae70c3184a62522, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e04687bd5284ad494533c9e803ae182, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/20afed4e64104bc9a278501b21ebb97d, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/40c4045f081542b5aeeef7de46b8afed, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/293ae11f8eef447d9ac9e5438d7ea94e, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/3885a155af014b448b516d5ba9e842f6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/806753efdd574085bc823d27f92d7e72, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/b96687febea94d28bfaf334528b1eeee, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/464c1fa18efe4e6294fa6f3b78b6117a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/8e981be9d1e14bb3b14aaed8771624e5, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/876dd432bf884fa18931cb994c511468, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/74dc3fced93245d2897f11a12f85ce0c, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/62b3aa7d12bf4d699058646a3972a117, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e6a7961156a45a7a081a39121cc6e02, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/72b77bc8dd034980b2cfd0b41d0d405a, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/365726a1884c4cfaa2030ad7e2cbbef0, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/e178d8169cb648bda8b3fd47093eae45, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1ced1b23f66e467a8ef969343ec0b4b6, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/6831244852d54d138dabad2e31faf3e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/7f226f71c159433a9543e80f6c7ed102, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/419a39a8b2044c0b828fafec533535e8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/55d4a7c5e6024a91ad6ccd039f7f39d8, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/981b0aac13794fc8aa958e3204f10dda, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/fe7bde190ac24cbd8f248bee5a393324, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/273922188d78448d8af93f342e536f60, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1e146893d79d43318b0d40a7c0046ead, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/db997a85cf5048dfbfd70f3c67d1404e] to archive 2024-12-04T15:24:27,334 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T15:24:27,335 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/34149d29674e4bd29ae70c3184a62522 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/34149d29674e4bd29ae70c3184a62522 2024-12-04T15:24:27,336 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e04687bd5284ad494533c9e803ae182 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e04687bd5284ad494533c9e803ae182 2024-12-04T15:24:27,337 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/20afed4e64104bc9a278501b21ebb97d to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/20afed4e64104bc9a278501b21ebb97d 2024-12-04T15:24:27,341 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/40c4045f081542b5aeeef7de46b8afed to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/40c4045f081542b5aeeef7de46b8afed 2024-12-04T15:24:27,342 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/293ae11f8eef447d9ac9e5438d7ea94e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/293ae11f8eef447d9ac9e5438d7ea94e 2024-12-04T15:24:27,343 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/3885a155af014b448b516d5ba9e842f6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/3885a155af014b448b516d5ba9e842f6 2024-12-04T15:24:27,344 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/806753efdd574085bc823d27f92d7e72 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/806753efdd574085bc823d27f92d7e72 2024-12-04T15:24:27,347 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/b96687febea94d28bfaf334528b1eeee to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/b96687febea94d28bfaf334528b1eeee 2024-12-04T15:24:27,348 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/464c1fa18efe4e6294fa6f3b78b6117a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/464c1fa18efe4e6294fa6f3b78b6117a 2024-12-04T15:24:27,349 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/8e981be9d1e14bb3b14aaed8771624e5 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/8e981be9d1e14bb3b14aaed8771624e5 2024-12-04T15:24:27,350 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/876dd432bf884fa18931cb994c511468 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/876dd432bf884fa18931cb994c511468 2024-12-04T15:24:27,351 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/74dc3fced93245d2897f11a12f85ce0c to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/74dc3fced93245d2897f11a12f85ce0c 2024-12-04T15:24:27,352 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/62b3aa7d12bf4d699058646a3972a117 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/62b3aa7d12bf4d699058646a3972a117 2024-12-04T15:24:27,353 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e6a7961156a45a7a081a39121cc6e02 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/4e6a7961156a45a7a081a39121cc6e02 2024-12-04T15:24:27,354 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/72b77bc8dd034980b2cfd0b41d0d405a to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/72b77bc8dd034980b2cfd0b41d0d405a 2024-12-04T15:24:27,355 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/365726a1884c4cfaa2030ad7e2cbbef0 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/365726a1884c4cfaa2030ad7e2cbbef0 2024-12-04T15:24:27,356 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/e178d8169cb648bda8b3fd47093eae45 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/e178d8169cb648bda8b3fd47093eae45 2024-12-04T15:24:27,357 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1ced1b23f66e467a8ef969343ec0b4b6 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1ced1b23f66e467a8ef969343ec0b4b6 2024-12-04T15:24:27,358 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/6831244852d54d138dabad2e31faf3e8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/6831244852d54d138dabad2e31faf3e8 2024-12-04T15:24:27,359 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/7f226f71c159433a9543e80f6c7ed102 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/7f226f71c159433a9543e80f6c7ed102 2024-12-04T15:24:27,360 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/419a39a8b2044c0b828fafec533535e8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/419a39a8b2044c0b828fafec533535e8 2024-12-04T15:24:27,361 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/55d4a7c5e6024a91ad6ccd039f7f39d8 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/55d4a7c5e6024a91ad6ccd039f7f39d8 2024-12-04T15:24:27,362 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/981b0aac13794fc8aa958e3204f10dda to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/981b0aac13794fc8aa958e3204f10dda 2024-12-04T15:24:27,363 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/fe7bde190ac24cbd8f248bee5a393324 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/fe7bde190ac24cbd8f248bee5a393324 2024-12-04T15:24:27,364 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/273922188d78448d8af93f342e536f60 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/273922188d78448d8af93f342e536f60 2024-12-04T15:24:27,365 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1e146893d79d43318b0d40a7c0046ead to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1e146893d79d43318b0d40a7c0046ead 2024-12-04T15:24:27,366 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/17f4ff5fb6cd42c0b7fedc8c4cb0f7b7 2024-12-04T15:24:27,367 DEBUG [StoreCloser-TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/db997a85cf5048dfbfd70f3c67d1404e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/db997a85cf5048dfbfd70f3c67d1404e 2024-12-04T15:24:27,370 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/recovered.edits/462.seqid, newMaxSeqId=462, maxSeqId=4 2024-12-04T15:24:27,371 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371. 2024-12-04T15:24:27,371 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for 5345b80b290e2620248a8fde2595e371: 2024-12-04T15:24:27,372 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed 5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,372 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=5345b80b290e2620248a8fde2595e371, regionState=CLOSED 2024-12-04T15:24:27,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-04T15:24:27,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure 5345b80b290e2620248a8fde2595e371, server=645c2dbfef2e,42169,1733325683856 in 1.5420 sec 2024-12-04T15:24:27,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-04T15:24:27,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5345b80b290e2620248a8fde2595e371, UNASSIGN in 1.5470 sec 2024-12-04T15:24:27,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-04T15:24:27,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5500 sec 2024-12-04T15:24:27,377 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733325867377"}]},"ts":"1733325867377"} 2024-12-04T15:24:27,379 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-04T15:24:27,381 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-04T15:24:27,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5620 sec 2024-12-04T15:24:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-04T15:24:27,933 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-04T15:24:27,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-04T15:24:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:27,935 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:27,936 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-04T15:24:27,937 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,939 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C, FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/recovered.edits] 2024-12-04T15:24:27,942 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/08f2e692b24b4c13b2da0d2f4841abdd to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/08f2e692b24b4c13b2da0d2f4841abdd 2024-12-04T15:24:27,943 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6210541a7d4b496699bab91168e15ee2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/6210541a7d4b496699bab91168e15ee2 2024-12-04T15:24:27,944 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/850161d944b54abc832025b83419e332 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/850161d944b54abc832025b83419e332 2024-12-04T15:24:27,945 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd374a71e7084aeebdae4b078f326ebb to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/A/fd374a71e7084aeebdae4b078f326ebb 2024-12-04T15:24:27,947 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3a0e24047d6c4cb095a0a515b6db4342 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3a0e24047d6c4cb095a0a515b6db4342 2024-12-04T15:24:27,949 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3b49e581b50b4521aff3bbe4d6ea4f43 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/3b49e581b50b4521aff3bbe4d6ea4f43 2024-12-04T15:24:27,951 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9540f0b139494f44bba33c8dd158dd1f to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/9540f0b139494f44bba33c8dd158dd1f 2024-12-04T15:24:27,953 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ea65dfdb9f7f41378b512565781f8748 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/B/ea65dfdb9f7f41378b512565781f8748 2024-12-04T15:24:27,955 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1c05143d4f864cc2b950bc9372b75c3e to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/1c05143d4f864cc2b950bc9372b75c3e 2024-12-04T15:24:27,959 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/2551c3ab47f14c088a2c6a60af1331b2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/2551c3ab47f14c088a2c6a60af1331b2 2024-12-04T15:24:27,960 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/9a85b70a86c0441e8a7b28bcc7991af2 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/9a85b70a86c0441e8a7b28bcc7991af2 2024-12-04T15:24:27,961 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/aaa984d6b3bb4e9abe60a8f5d2c91968 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/C/aaa984d6b3bb4e9abe60a8f5d2c91968 2024-12-04T15:24:27,964 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/recovered.edits/462.seqid to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371/recovered.edits/462.seqid 2024-12-04T15:24:27,965 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/default/TestAcidGuarantees/5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,965 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-04T15:24:27,965 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-04T15:24:27,966 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-04T15:24:27,969 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412041ab89625e3cb4562a7502cf5bf3eee25_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412041ab89625e3cb4562a7502cf5bf3eee25_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,970 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120429c81e69d3e24dc0953c35616a83cc57_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120429c81e69d3e24dc0953c35616a83cc57_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,972 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042e842442b375492bb9dc185fd9d24069_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412042e842442b375492bb9dc185fd9d24069_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,974 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204342bcf4585634c9c925a72c6dfe787c4_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204342bcf4585634c9c925a72c6dfe787c4_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,975 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204445bfd43f7bf404c837e15d3f81e19fa_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204445bfd43f7bf404c837e15d3f81e19fa_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,976 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120445d0306f5e9b43d3b8ca5560a9199077_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120445d0306f5e9b43d3b8ca5560a9199077_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,980 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120447da12f5290948e2a10af3ace8b21b62_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120447da12f5290948e2a10af3ace8b21b62_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,981 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044a3ee12c4d744ff5895aace530d1f0ee_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044a3ee12c4d744ff5895aace530d1f0ee_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,983 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044c9e4817b5b343a7b7ffac364cb1a453_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412044c9e4817b5b343a7b7ffac364cb1a453_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,984 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204624cbb5f0cb24dc3861e6005cba8f2e0_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204624cbb5f0cb24dc3861e6005cba8f2e0_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,985 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046e2145939a7b42a3ac486620374ad331_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412046e2145939a7b42a3ac486620374ad331_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,986 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204718c9c0a5d08414d8fc83082f36f7c98_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204718c9c0a5d08414d8fc83082f36f7c98_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,987 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047e492355f7e041ab853b6fce32f1580f_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047e492355f7e041ab853b6fce32f1580f_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,989 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047ebb9d58a2e74097b17e4974714eaac6_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412047ebb9d58a2e74097b17e4974714eaac6_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,990 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412048451c064d88f4351843680e256758efb_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412048451c064d88f4351843680e256758efb_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,991 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204b2321b8effd34d95b18ec6d314f2d5c2_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204b2321b8effd34d95b18ec6d314f2d5c2_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,993 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c4ffba01bfd24e488978e49f4341e3f0_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204c4ffba01bfd24e488978e49f4341e3f0_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,994 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204dcb4f6e201e9437bafb12f7b875e1c2e_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204dcb4f6e201e9437bafb12f7b875e1c2e_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,996 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204dfef3907570c4a5c983b73b2232e4b94_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204dfef3907570c4a5c983b73b2232e4b94_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,997 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204ef8eaf9bb2f64191bab5a03a935e4084_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204ef8eaf9bb2f64191bab5a03a935e4084_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:27,998 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f12d8dde348849638b40ef6ad451fa9e_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f12d8dde348849638b40ef6ad451fa9e_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:28,000 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f872128c7d184513b23323ee00795f4c_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204f872128c7d184513b23323ee00795f4c_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:28,001 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fa61343e0307448ab59b599b389878fe_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fa61343e0307448ab59b599b389878fe_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:28,002 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fb5f2946349d4c47886e2914dcdc3469_5345b80b290e2620248a8fde2595e371 to hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241204fb5f2946349d4c47886e2914dcdc3469_5345b80b290e2620248a8fde2595e371 2024-12-04T15:24:28,003 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-04T15:24:28,006 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:28,008 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-04T15:24:28,014 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-04T15:24:28,015 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:28,015 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-04T15:24:28,015 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733325868015"}]},"ts":"9223372036854775807"} 2024-12-04T15:24:28,017 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-04T15:24:28,017 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5345b80b290e2620248a8fde2595e371, NAME => 'TestAcidGuarantees,,1733325841600.5345b80b290e2620248a8fde2595e371.', STARTKEY => '', ENDKEY => ''}] 2024-12-04T15:24:28,017 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-04T15:24:28,018 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733325868017"}]},"ts":"9223372036854775807"} 2024-12-04T15:24:28,021 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-04T15:24:28,024 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-04T15:24:28,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-12-04T15:24:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33167 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-04T15:24:28,037 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-04T15:24:28,049 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=238 (was 237) - Thread LEAK? -, OpenFileDescriptor=453 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=968 (was 975), ProcessCount=11 (was 11), AvailableMemoryMB=1740 (was 1917) 2024-12-04T15:24:28,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-04T15:24:28,050 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-04T15:24:28,050 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x731b646e to 127.0.0.1:55739 2024-12-04T15:24:28,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:28,050 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T15:24:28,050 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1111253708, stopped=false 2024-12-04T15:24:28,051 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=645c2dbfef2e,33167,1733325683129 2024-12-04T15:24:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:24:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:24:28,113 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-04T15:24:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:24:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:24:28,114 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:24:28,114 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:24:28,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:28,115 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '645c2dbfef2e,42169,1733325683856' ***** 2024-12-04T15:24:28,115 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-04T15:24:28,115 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:24:28,115 INFO [RS:0;645c2dbfef2e:42169 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:24:28,116 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(3579): Received CLOSE for 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1224): stopping server 645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:28,116 DEBUG [RS:0;645c2dbfef2e:42169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:24:28,116 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-04T15:24:28,117 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-04T15:24:28,117 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1603): Online Regions={369c833ab6e0e1ae5f4d743d2988012a=hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a., 1588230740=hbase:meta,,1.1588230740} 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 369c833ab6e0e1ae5f4d743d2988012a, disabling compactions & flushes 2024-12-04T15:24:28,117 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. after waiting 0 ms 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:24:28,117 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 369c833ab6e0e1ae5f4d743d2988012a 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-04T15:24:28,117 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T15:24:28,117 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T15:24:28,117 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-04T15:24:28,118 INFO [regionserver/645c2dbfef2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:24:28,121 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:24:28,141 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/.tmp/info/87cea58819014393bcc65f07eb9d0e84 is 45, key is default/info:d/1733325688893/Put/seqid=0 2024-12-04T15:24:28,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742582_1758 (size=5037) 2024-12-04T15:24:28,152 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/info/33dcb3820c78483e906cd87a1c74a013 is 143, key is hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a./info:regioninfo/1733325688815/Put/seqid=0 2024-12-04T15:24:28,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742583_1759 (size=7725) 2024-12-04T15:24:28,321 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:24:28,521 DEBUG [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 369c833ab6e0e1ae5f4d743d2988012a 2024-12-04T15:24:28,545 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/.tmp/info/87cea58819014393bcc65f07eb9d0e84 2024-12-04T15:24:28,550 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/.tmp/info/87cea58819014393bcc65f07eb9d0e84 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/info/87cea58819014393bcc65f07eb9d0e84 2024-12-04T15:24:28,554 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/info/87cea58819014393bcc65f07eb9d0e84, entries=2, sequenceid=6, filesize=4.9 K 2024-12-04T15:24:28,555 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 369c833ab6e0e1ae5f4d743d2988012a in 438ms, sequenceid=6, compaction requested=false 2024-12-04T15:24:28,557 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/info/33dcb3820c78483e906cd87a1c74a013 2024-12-04T15:24:28,560 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/namespace/369c833ab6e0e1ae5f4d743d2988012a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-04T15:24:28,561 INFO [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:24:28,561 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 369c833ab6e0e1ae5f4d743d2988012a: 2024-12-04T15:24:28,561 DEBUG [RS_CLOSE_REGION-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733325687937.369c833ab6e0e1ae5f4d743d2988012a. 2024-12-04T15:24:28,588 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/rep_barrier/5cbcb6ac25374d269c2466226c84d26c is 102, key is TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700./rep_barrier:/1733325713746/DeleteFamily/seqid=0 2024-12-04T15:24:28,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742584_1760 (size=6025) 2024-12-04T15:24:28,598 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/rep_barrier/5cbcb6ac25374d269c2466226c84d26c 2024-12-04T15:24:28,616 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/table/b8b3f746c61b4e1096942cea5a6ab709 is 96, key is TestAcidGuarantees,,1733325689118.e2e19d2bb9bfcadbc1f5e0b910706700./table:/1733325713746/DeleteFamily/seqid=0 2024-12-04T15:24:28,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742585_1761 (size=5942) 2024-12-04T15:24:28,634 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/table/b8b3f746c61b4e1096942cea5a6ab709 2024-12-04T15:24:28,638 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/info/33dcb3820c78483e906cd87a1c74a013 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/info/33dcb3820c78483e906cd87a1c74a013 2024-12-04T15:24:28,642 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/info/33dcb3820c78483e906cd87a1c74a013, entries=22, sequenceid=93, filesize=7.5 K 2024-12-04T15:24:28,643 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/rep_barrier/5cbcb6ac25374d269c2466226c84d26c as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/rep_barrier/5cbcb6ac25374d269c2466226c84d26c 2024-12-04T15:24:28,647 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/rep_barrier/5cbcb6ac25374d269c2466226c84d26c, entries=6, sequenceid=93, filesize=5.9 K 2024-12-04T15:24:28,647 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/.tmp/table/b8b3f746c61b4e1096942cea5a6ab709 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/table/b8b3f746c61b4e1096942cea5a6ab709 2024-12-04T15:24:28,651 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/table/b8b3f746c61b4e1096942cea5a6ab709, entries=9, sequenceid=93, filesize=5.8 K 2024-12-04T15:24:28,651 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 534ms, sequenceid=93, compaction requested=false 2024-12-04T15:24:28,667 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-04T15:24:28,667 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T15:24:28,667 INFO [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-04T15:24:28,667 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-04T15:24:28,667 DEBUG [RS_CLOSE_META-regionserver/645c2dbfef2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T15:24:28,676 INFO [regionserver/645c2dbfef2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T15:24:28,676 INFO [regionserver/645c2dbfef2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T15:24:28,721 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1250): stopping server 645c2dbfef2e,42169,1733325683856; all regions closed. 2024-12-04T15:24:28,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741834_1010 (size=26050) 2024-12-04T15:24:28,728 DEBUG [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/oldWALs 2024-12-04T15:24:28,728 INFO [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 645c2dbfef2e%2C42169%2C1733325683856.meta:.meta(num 1733325687666) 2024-12-04T15:24:28,732 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/WALs/645c2dbfef2e,42169,1733325683856/645c2dbfef2e%2C42169%2C1733325683856.1733325686782 not finished, retry = 0 2024-12-04T15:24:28,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741832_1008 (size=17668035) 2024-12-04T15:24:28,835 DEBUG [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/oldWALs 2024-12-04T15:24:28,835 INFO [RS:0;645c2dbfef2e:42169 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 645c2dbfef2e%2C42169%2C1733325683856:(num 1733325686782) 2024-12-04T15:24:28,835 DEBUG [RS:0;645c2dbfef2e:42169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:28,835 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:24:28,836 INFO [RS:0;645c2dbfef2e:42169 {}] hbase.ChoreService(370): Chore service for: regionserver/645c2dbfef2e:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-04T15:24:28,836 INFO [regionserver/645c2dbfef2e:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-04T15:24:28,837 INFO [RS:0;645c2dbfef2e:42169 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42169 2024-12-04T15:24:28,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:24:28,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/645c2dbfef2e,42169,1733325683856 2024-12-04T15:24:29,051 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [645c2dbfef2e,42169,1733325683856] 2024-12-04T15:24:29,051 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 645c2dbfef2e,42169,1733325683856; numProcessing=1 2024-12-04T15:24:29,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:24:29,151 INFO [RS:0;645c2dbfef2e:42169 {}] regionserver.HRegionServer(1307): Exiting; stopping=645c2dbfef2e,42169,1733325683856; zookeeper connection closed. 2024-12-04T15:24:29,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42169-0x1005d9eaf690001, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:24:29,152 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@c12ca09 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@c12ca09 2024-12-04T15:24:29,152 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T15:24:29,159 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/645c2dbfef2e,42169,1733325683856 already deleted, retry=false 2024-12-04T15:24:29,159 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 645c2dbfef2e,42169,1733325683856 expired; onlineServers=0 2024-12-04T15:24:29,159 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '645c2dbfef2e,33167,1733325683129' ***** 2024-12-04T15:24:29,159 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T15:24:29,159 DEBUG [M:0;645c2dbfef2e:33167 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22ad2c72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=645c2dbfef2e/172.17.0.2:0 2024-12-04T15:24:29,159 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegionServer(1224): stopping server 645c2dbfef2e,33167,1733325683129 2024-12-04T15:24:29,159 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegionServer(1250): stopping server 645c2dbfef2e,33167,1733325683129; all regions closed. 2024-12-04T15:24:29,159 DEBUG [M:0;645c2dbfef2e:33167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:24:29,160 DEBUG [M:0;645c2dbfef2e:33167 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T15:24:29,160 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T15:24:29,160 DEBUG [M:0;645c2dbfef2e:33167 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T15:24:29,160 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster-HFileCleaner.large.0-1733325686417 {}] cleaner.HFileCleaner(306): Exit Thread[master/645c2dbfef2e:0:becomeActiveMaster-HFileCleaner.large.0-1733325686417,5,FailOnTimeoutGroup] 2024-12-04T15:24:29,160 INFO [M:0;645c2dbfef2e:33167 {}] hbase.ChoreService(370): Chore service for: master/645c2dbfef2e:0 had [] on shutdown 2024-12-04T15:24:29,160 DEBUG [master/645c2dbfef2e:0:becomeActiveMaster-HFileCleaner.small.0-1733325686420 {}] cleaner.HFileCleaner(306): Exit Thread[master/645c2dbfef2e:0:becomeActiveMaster-HFileCleaner.small.0-1733325686420,5,FailOnTimeoutGroup] 2024-12-04T15:24:29,160 DEBUG [M:0;645c2dbfef2e:33167 {}] master.HMaster(1733): Stopping service threads 2024-12-04T15:24:29,160 INFO [M:0;645c2dbfef2e:33167 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T15:24:29,160 ERROR [M:0;645c2dbfef2e:33167 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-04T15:24:29,161 INFO [M:0;645c2dbfef2e:33167 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T15:24:29,162 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T15:24:29,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T15:24:29,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:24:29,236 DEBUG [M:0;645c2dbfef2e:33167 {}] zookeeper.ZKUtil(347): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T15:24:29,236 WARN [M:0;645c2dbfef2e:33167 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T15:24:29,236 INFO [M:0;645c2dbfef2e:33167 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-04T15:24:29,236 INFO [M:0;645c2dbfef2e:33167 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T15:24:29,237 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:24:29,237 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T15:24:29,237 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:24:29,237 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:24:29,237 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T15:24:29,237 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:24:29,237 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=803.96 KB heapSize=991.64 KB 2024-12-04T15:24:29,260 DEBUG [M:0;645c2dbfef2e:33167 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03a62467974d4765bd192271d0897dd0 is 82, key is hbase:meta,,1/info:regioninfo/1733325687820/Put/seqid=0 2024-12-04T15:24:29,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742586_1762 (size=5672) 2024-12-04T15:24:29,264 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03a62467974d4765bd192271d0897dd0 2024-12-04T15:24:29,294 DEBUG [M:0;645c2dbfef2e:33167 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/55f7b88320e6450f9e1930e5c0256131 is 2284, key is \x00\x00\x00\x00\x00\x00\x00(/proc:d/1733325716761/Put/seqid=0 2024-12-04T15:24:29,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742587_1763 (size=44071) 2024-12-04T15:24:29,299 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=803.40 KB at sequenceid=2330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/55f7b88320e6450f9e1930e5c0256131 2024-12-04T15:24:29,302 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 55f7b88320e6450f9e1930e5c0256131 2024-12-04T15:24:29,326 DEBUG [M:0;645c2dbfef2e:33167 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfc5bb1488874e6699bdbf078d8dcba9 is 69, key is 645c2dbfef2e,42169,1733325683856/rs:state/1733325686549/Put/seqid=0 2024-12-04T15:24:29,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073742588_1764 (size=5156) 2024-12-04T15:24:29,330 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2330 (bloomFilter=true), to=hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfc5bb1488874e6699bdbf078d8dcba9 2024-12-04T15:24:29,334 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03a62467974d4765bd192271d0897dd0 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03a62467974d4765bd192271d0897dd0 2024-12-04T15:24:29,337 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03a62467974d4765bd192271d0897dd0, entries=8, sequenceid=2330, filesize=5.5 K 2024-12-04T15:24:29,338 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/55f7b88320e6450f9e1930e5c0256131 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/55f7b88320e6450f9e1930e5c0256131 2024-12-04T15:24:29,341 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 55f7b88320e6450f9e1930e5c0256131 2024-12-04T15:24:29,341 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/55f7b88320e6450f9e1930e5c0256131, entries=173, sequenceid=2330, filesize=43.0 K 2024-12-04T15:24:29,341 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfc5bb1488874e6699bdbf078d8dcba9 as hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dfc5bb1488874e6699bdbf078d8dcba9 2024-12-04T15:24:29,344 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38975/user/jenkins/test-data/2e8c7a4f-aa79-e6a2-94cc-4ce2ce52729c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dfc5bb1488874e6699bdbf078d8dcba9, entries=1, sequenceid=2330, filesize=5.0 K 2024-12-04T15:24:29,349 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(3040): Finished flush of dataSize ~803.96 KB/823255, heapSize ~991.34 KB/1015136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=2330, compaction requested=false 2024-12-04T15:24:29,356 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:24:29,356 DEBUG [M:0;645c2dbfef2e:33167 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-04T15:24:29,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35183 is added to blk_1073741830_1006 (size=975920) 2024-12-04T15:24:29,365 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-04T15:24:29,366 INFO [M:0;645c2dbfef2e:33167 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-04T15:24:29,366 INFO [M:0;645c2dbfef2e:33167 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33167 2024-12-04T15:24:29,440 DEBUG [M:0;645c2dbfef2e:33167 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/645c2dbfef2e,33167,1733325683129 already deleted, retry=false 2024-12-04T15:24:29,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:24:29,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33167-0x1005d9eaf690000, quorum=127.0.0.1:55739, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:24:29,648 INFO [M:0;645c2dbfef2e:33167 {}] regionserver.HRegionServer(1307): Exiting; stopping=645c2dbfef2e,33167,1733325683129; zookeeper connection closed. 2024-12-04T15:24:29,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@646fdc50{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:24:29,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22319d81{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:24:29,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:24:29,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2da77e5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:24:29,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13c4bd01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/hadoop.log.dir/,STOPPED} 2024-12-04T15:24:29,713 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:24:29,713 WARN [BP-1019263298-172.17.0.2-1733325680011 heartbeating to localhost/127.0.0.1:38975 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:24:29,713 WARN [BP-1019263298-172.17.0.2-1733325680011 heartbeating to localhost/127.0.0.1:38975 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1019263298-172.17.0.2-1733325680011 (Datanode Uuid b02f84e7-d7eb-4d71-b2d2-cf5c4ca9a4cf) service to localhost/127.0.0.1:38975 2024-12-04T15:24:29,713 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:24:29,715 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/dfs/data/data1/current/BP-1019263298-172.17.0.2-1733325680011 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:24:29,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/cluster_56e0ac68-583d-08f0-32b8-9e10e2ab879d/dfs/data/data2/current/BP-1019263298-172.17.0.2-1733325680011 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:24:29,716 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:24:29,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38fd023f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T15:24:29,726 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@509a85ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:24:29,726 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:24:29,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12fea530{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:24:29,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@644d9c1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94ca98ef-de24-dd2e-72da-43814579e3bf/hadoop.log.dir/,STOPPED} 2024-12-04T15:24:29,753 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-04T15:24:29,995 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down